file_name
int64
0
72.3k
vulnerable_line_numbers
stringlengths
1
1.06k
dataset_type
stringclasses
1 value
commit_hash
stringlengths
40
44
unique_id
int64
0
271k
project
stringclasses
10 values
target
int64
0
1
repo_url
stringclasses
10 values
date
stringlengths
25
25
code
stringlengths
0
20.4M
CVE
stringlengths
13
43
CWE
stringclasses
50 values
commit_link
stringlengths
73
97
severity
stringclasses
4 values
__index_level_0__
int64
0
124k
18,105
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,105
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/policy/core/common/cloud/cloud_policy_service.h" #include <stddef.h> #include "base/callback.h" #include "base/logging.h" #include "base/metrics/histogram_macros.h" #include "base/time/time.h" #include "components/policy/core/common/cloud/cloud_policy_constants.h" #include "components/policy/proto/device_management_backend.pb.h" namespace em = enterprise_management; namespace policy { CloudPolicyService::CloudPolicyService(const std::string& policy_type, const std::string& settings_entity_id, CloudPolicyClient* client, CloudPolicyStore* store) : policy_type_(policy_type), settings_entity_id_(settings_entity_id), client_(client), store_(store), refresh_state_(REFRESH_NONE), unregister_state_(UNREGISTER_NONE), initialization_complete_(false) { client_->AddPolicyTypeToFetch(policy_type_, settings_entity_id_); client_->AddObserver(this); store_->AddObserver(this); // Make sure we initialize |client_| from the policy data that might be // already present in |store_|. OnStoreLoaded(store_); } CloudPolicyService::~CloudPolicyService() { client_->RemovePolicyTypeToFetch(policy_type_, settings_entity_id_); client_->RemoveObserver(this); store_->RemoveObserver(this); } std::string CloudPolicyService::ManagedBy() const { const em::PolicyData* policy = store_->policy(); if (policy) { std::string username = policy->username(); std::size_t pos = username.find('@'); if (pos != std::string::npos) return username.substr(pos + 1); } return std::string(); } void CloudPolicyService::RefreshPolicy(const RefreshPolicyCallback& callback) { // If the client is not registered or is unregistering, bail out. if (!client_->is_registered() || unregister_state_ != UNREGISTER_NONE) { callback.Run(false); return; } // Else, trigger a refresh. refresh_callbacks_.push_back(callback); refresh_state_ = REFRESH_POLICY_FETCH; client_->FetchPolicy(); } void CloudPolicyService::Unregister(const UnregisterCallback& callback) { // Abort all pending refresh requests. if (refresh_state_ != REFRESH_NONE) RefreshCompleted(false); // Abort previous unregister request if any. if (unregister_state_ != UNREGISTER_NONE) UnregisterCompleted(false); unregister_callback_ = callback; unregister_state_ = UNREGISTER_PENDING; client_->Unregister(); } void CloudPolicyService::OnPolicyFetched(CloudPolicyClient* client) { if (client_->status() != DM_STATUS_SUCCESS) { RefreshCompleted(false); return; } const em::PolicyFetchResponse* policy = client_->GetPolicyFor(policy_type_, settings_entity_id_); if (policy) { if (refresh_state_ != REFRESH_NONE) refresh_state_ = REFRESH_POLICY_STORE; store_->Store(*policy, client->fetched_invalidation_version()); } else { RefreshCompleted(false); } } void CloudPolicyService::OnRegistrationStateChanged(CloudPolicyClient* client) { if (unregister_state_ == UNREGISTER_PENDING) UnregisterCompleted(true); } void CloudPolicyService::OnClientError(CloudPolicyClient* client) { if (refresh_state_ == REFRESH_POLICY_FETCH) RefreshCompleted(false); if (unregister_state_ == UNREGISTER_PENDING) UnregisterCompleted(false); } void CloudPolicyService::OnStoreLoaded(CloudPolicyStore* store) { // Update the client with state from the store. const em::PolicyData* policy(store_->policy()); // Timestamp. base::Time policy_timestamp; if (policy && policy->has_timestamp()) policy_timestamp = base::Time::FromJavaTime(policy->timestamp()); const base::Time& old_timestamp = client_->last_policy_timestamp(); if (!policy_timestamp.is_null() && !old_timestamp.is_null() && policy_timestamp != old_timestamp) { const base::TimeDelta age = policy_timestamp - old_timestamp; // TODO(zmin): add UMA for new policy type. if (policy_type_ == dm_protocol::kChromeUserPolicyType) { UMA_HISTOGRAM_CUSTOM_COUNTS("Enterprise.PolicyUpdatePeriod.User", age.InDays(), 1, 1000, 100); } else if (policy_type_ == dm_protocol::kChromeDevicePolicyType) { UMA_HISTOGRAM_CUSTOM_COUNTS("Enterprise.PolicyUpdatePeriod.Device", age.InDays(), 1, 1000, 100); } } client_->set_last_policy_timestamp(policy_timestamp); // Public key version. if (policy && policy->has_public_key_version()) client_->set_public_key_version(policy->public_key_version()); else client_->clear_public_key_version(); // Finally, set up registration if necessary. if (policy && policy->has_request_token() && policy->has_device_id() && !client_->is_registered()) { DVLOG(1) << "Setting up registration with request token: " << policy->request_token(); std::vector<std::string> user_affiliation_ids( policy->user_affiliation_ids().begin(), policy->user_affiliation_ids().end()); client_->SetupRegistration(policy->request_token(), policy->device_id(), user_affiliation_ids); } if (refresh_state_ == REFRESH_POLICY_STORE) RefreshCompleted(true); CheckInitializationCompleted(); } void CloudPolicyService::OnStoreError(CloudPolicyStore* store) { if (refresh_state_ == REFRESH_POLICY_STORE) RefreshCompleted(false); CheckInitializationCompleted(); } void CloudPolicyService::CheckInitializationCompleted() { if (!IsInitializationComplete() && store_->is_initialized()) { initialization_complete_ = true; for (auto& observer : observers_) observer.OnInitializationCompleted(this); } } void CloudPolicyService::RefreshCompleted(bool success) { // Clear state and |refresh_callbacks_| before actually invoking them, s.t. // triggering new policy fetches behaves as expected. std::vector<RefreshPolicyCallback> callbacks; callbacks.swap(refresh_callbacks_); refresh_state_ = REFRESH_NONE; for (std::vector<RefreshPolicyCallback>::iterator callback(callbacks.begin()); callback != callbacks.end(); ++callback) { callback->Run(success); } } void CloudPolicyService::UnregisterCompleted(bool success) { if (!success) LOG(ERROR) << "Unregister request failed."; unregister_state_ = UNREGISTER_NONE; unregister_callback_.Run(success); unregister_callback_ = UnregisterCallback(); // Reset. } void CloudPolicyService::AddObserver(Observer* observer) { observers_.AddObserver(observer); } void CloudPolicyService::RemoveObserver(Observer* observer) { observers_.RemoveObserver(observer); } } // namespace policy
null
null
null
null
14,968
30,252
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
195,247
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/** * udc.c - Core UDC Framework * * Copyright (C) 2016 Intel Corporation * Author: Felipe Balbi <felipe.balbi@linux.intel.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 of * the License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #undef TRACE_SYSTEM #define TRACE_SYSTEM gadget #if !defined(__UDC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define __UDC_TRACE_H #include <linux/types.h> #include <linux/tracepoint.h> #include <asm/byteorder.h> #include <linux/usb/gadget.h> DECLARE_EVENT_CLASS(udc_log_gadget, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret), TP_STRUCT__entry( __field(enum usb_device_speed, speed) __field(enum usb_device_speed, max_speed) __field(enum usb_device_state, state) __field(unsigned, mA) __field(unsigned, sg_supported) __field(unsigned, is_otg) __field(unsigned, is_a_peripheral) __field(unsigned, b_hnp_enable) __field(unsigned, a_hnp_support) __field(unsigned, hnp_polling_support) __field(unsigned, host_request_flag) __field(unsigned, quirk_ep_out_aligned_size) __field(unsigned, quirk_altset_not_supp) __field(unsigned, quirk_stall_not_supp) __field(unsigned, quirk_zlp_not_supp) __field(unsigned, is_selfpowered) __field(unsigned, deactivated) __field(unsigned, connected) __field(int, ret) ), TP_fast_assign( __entry->speed = g->speed; __entry->max_speed = g->max_speed; __entry->state = g->state; __entry->mA = g->mA; __entry->sg_supported = g->sg_supported; __entry->is_otg = g->is_otg; __entry->is_a_peripheral = g->is_a_peripheral; __entry->b_hnp_enable = g->b_hnp_enable; __entry->a_hnp_support = g->a_hnp_support; __entry->hnp_polling_support = g->hnp_polling_support; __entry->host_request_flag = g->host_request_flag; __entry->quirk_ep_out_aligned_size = g->quirk_ep_out_aligned_size; __entry->quirk_altset_not_supp = g->quirk_altset_not_supp; __entry->quirk_stall_not_supp = g->quirk_stall_not_supp; __entry->quirk_zlp_not_supp = g->quirk_zlp_not_supp; __entry->is_selfpowered = g->is_selfpowered; __entry->deactivated = g->deactivated; __entry->connected = g->connected; __entry->ret = ret; ), TP_printk("speed %d/%d state %d %dmA [%s%s%s%s%s%s%s%s%s%s%s%s%s%s] --> %d", __entry->speed, __entry->max_speed, __entry->state, __entry->mA, __entry->sg_supported ? "sg:" : "", __entry->is_otg ? "OTG:" : "", __entry->is_a_peripheral ? "a_peripheral:" : "", __entry->b_hnp_enable ? "b_hnp:" : "", __entry->a_hnp_support ? "a_hnp:" : "", __entry->hnp_polling_support ? "hnp_poll:" : "", __entry->host_request_flag ? "hostreq:" : "", __entry->quirk_ep_out_aligned_size ? "out_aligned:" : "", __entry->quirk_altset_not_supp ? "no_altset:" : "", __entry->quirk_stall_not_supp ? "no_stall:" : "", __entry->quirk_zlp_not_supp ? "no_zlp" : "", __entry->is_selfpowered ? "self-powered:" : "bus-powered:", __entry->deactivated ? "deactivated:" : "activated:", __entry->connected ? "connected" : "disconnected", __entry->ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_frame_number, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_clear_selfpowered, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_connect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_draw, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_disconnect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_connect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_disconnect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_deactivate, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_activate, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DECLARE_EVENT_CLASS(udc_log_ep, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret), TP_STRUCT__entry( __dynamic_array(char, name, UDC_TRACE_STR_MAX) __field(unsigned, maxpacket) __field(unsigned, maxpacket_limit) __field(unsigned, max_streams) __field(unsigned, mult) __field(unsigned, maxburst) __field(u8, address) __field(bool, claimed) __field(bool, enabled) __field(int, ret) ), TP_fast_assign( snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name); __entry->maxpacket = ep->maxpacket; __entry->maxpacket_limit = ep->maxpacket_limit; __entry->max_streams = ep->max_streams; __entry->mult = ep->mult; __entry->maxburst = ep->maxburst; __entry->address = ep->address, __entry->claimed = ep->claimed; __entry->enabled = ep->enabled; __entry->ret = ret; ), TP_printk("%s: mps %d/%d streams %d mult %d burst %d addr %02x %s%s --> %d", __get_str(name), __entry->maxpacket, __entry->maxpacket_limit, __entry->max_streams, __entry->mult, __entry->maxburst, __entry->address, __entry->claimed ? "claimed:" : "released:", __entry->enabled ? "enabled" : "disabled", ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_maxpacket_limit, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_enable, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_disable, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_halt, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_clear_halt, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_wedge, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_fifo_status, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_fifo_flush, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DECLARE_EVENT_CLASS(udc_log_req, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret), TP_STRUCT__entry( __dynamic_array(char, name, UDC_TRACE_STR_MAX) __field(unsigned, length) __field(unsigned, actual) __field(unsigned, num_sgs) __field(unsigned, num_mapped_sgs) __field(unsigned, stream_id) __field(unsigned, no_interrupt) __field(unsigned, zero) __field(unsigned, short_not_ok) __field(int, status) __field(int, ret) ), TP_fast_assign( snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name); __entry->length = req->length; __entry->actual = req->actual; __entry->num_sgs = req->num_sgs; __entry->num_mapped_sgs = req->num_mapped_sgs; __entry->stream_id = req->stream_id; __entry->no_interrupt = req->no_interrupt; __entry->zero = req->zero; __entry->short_not_ok = req->short_not_ok; __entry->status = req->status; __entry->ret = ret; ), TP_printk("%s: length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d", __get_str(name), __entry->actual, __entry->length, __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id, __entry->zero ? "Z" : "z", __entry->short_not_ok ? "S" : "s", __entry->no_interrupt ? "i" : "I", __entry->status, __entry->ret ) ); DEFINE_EVENT(udc_log_req, usb_ep_alloc_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_free_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_queue, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_dequeue, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_gadget_giveback_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); #endif /* __UDC_TRACE_H */ /* this part has to be here */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h>
null
null
null
null
103,594
19,830
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
184,825
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet * driver for Linux. * * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/debugfs.h> #include <linux/ethtool.h> #include <linux/mdio.h> #include "t4vf_common.h" #include "t4vf_defs.h" #include "../cxgb4/t4_regs.h" #include "../cxgb4/t4_msg.h" /* * Generic information about the driver. */ #define DRV_VERSION "2.0.0-ko" #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver" /* * Module Parameters. * ================== */ /* * Default ethtool "message level" for adapters. */ #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) /* * The driver uses the best interrupt scheme available on a platform in the * order MSI-X then MSI. This parameter determines which of these schemes the * driver may consider as follows: * * msi = 2: choose from among MSI-X and MSI * msi = 1: only consider MSI interrupts * * Note that unlike the Physical Function driver, this Virtual Function driver * does _not_ support legacy INTx interrupts (this limitation is mandated by * the PCI-E SR-IOV standard). */ #define MSI_MSIX 2 #define MSI_MSI 1 #define MSI_DEFAULT MSI_MSIX static int msi = MSI_DEFAULT; module_param(msi, int, 0644); MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI"); /* * Fundamental constants. * ====================== */ enum { MAX_TXQ_ENTRIES = 16384, MAX_RSPQ_ENTRIES = 16384, MAX_RX_BUFFERS = 16384, MIN_TXQ_ENTRIES = 32, MIN_RSPQ_ENTRIES = 128, MIN_FL_ENTRIES = 16, /* * For purposes of manipulating the Free List size we need to * recognize that Free Lists are actually Egress Queues (the host * produces free buffers which the hardware consumes), Egress Queues * indices are all in units of Egress Context Units bytes, and free * list entries are 64-bit PCI DMA addresses. And since the state of * the Producer Index == the Consumer Index implies an EMPTY list, we * always have at least one Egress Unit's worth of Free List entries * unused. See sge.c for more details ... */ EQ_UNIT = SGE_EQ_IDXSIZE, FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), MIN_FL_RESID = FL_PER_EQ_UNIT, }; /* * Global driver state. * ==================== */ static struct dentry *cxgb4vf_debugfs_root; /* * OS "Callback" functions. * ======================== */ /* * The link status has changed on the indicated "port" (Virtual Interface). */ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) { struct net_device *dev = adapter->port[pidx]; /* * If the port is disabled or the current recorded "link up" * status matches the new status, just return. */ if (!netif_running(dev) || link_ok == netif_carrier_ok(dev)) return; /* * Tell the OS that the link status has changed and print a short * informative message on the console about the event. */ if (link_ok) { const char *s; const char *fc; const struct port_info *pi = netdev_priv(dev); netif_carrier_on(dev); switch (pi->link_cfg.speed) { case 100: s = "100Mbps"; break; case 1000: s = "1Gbps"; break; case 10000: s = "10Gbps"; break; case 25000: s = "25Gbps"; break; case 40000: s = "40Gbps"; break; case 100000: s = "100Gbps"; break; default: s = "unknown"; break; } switch (pi->link_cfg.fc) { case PAUSE_RX: fc = "RX"; break; case PAUSE_TX: fc = "TX"; break; case PAUSE_RX|PAUSE_TX: fc = "RX/TX"; break; default: fc = "no"; break; } netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc); } else { netif_carrier_off(dev); netdev_info(dev, "link down\n"); } } /* * THe port module type has changed on the indicated "port" (Virtual * Interface). */ void t4vf_os_portmod_changed(struct adapter *adapter, int pidx) { static const char * const mod_str[] = { NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" }; const struct net_device *dev = adapter->port[pidx]; const struct port_info *pi = netdev_priv(dev); if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) dev_info(adapter->pdev_dev, "%s: port module unplugged\n", dev->name); else if (pi->mod_type < ARRAY_SIZE(mod_str)) dev_info(adapter->pdev_dev, "%s: %s port module inserted\n", dev->name, mod_str[pi->mod_type]); else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) dev_info(adapter->pdev_dev, "%s: unsupported optical port " "module inserted\n", dev->name); else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) dev_info(adapter->pdev_dev, "%s: unknown port module inserted," "forcing TWINAX\n", dev->name); else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) dev_info(adapter->pdev_dev, "%s: transceiver module error\n", dev->name); else dev_info(adapter->pdev_dev, "%s: unknown module type %d " "inserted\n", dev->name, pi->mod_type); } /* * Net device operations. * ====================== */ /* * Perform the MAC and PHY actions needed to enable a "port" (Virtual * Interface). */ static int link_start(struct net_device *dev) { int ret; struct port_info *pi = netdev_priv(dev); /* * We do not set address filters and promiscuity here, the stack does * that step explicitly. Enable vlan accel. */ ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1, true); if (ret == 0) { ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt, dev->dev_addr, true); if (ret >= 0) { pi->xact_addr_filt = ret; ret = 0; } } /* * We don't need to actually "start the link" itself since the * firmware will do that for us when the first Virtual Interface * is enabled on a port. */ if (ret == 0) ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); return ret; } /* * Name the MSI-X interrupts. */ static void name_msix_vecs(struct adapter *adapter) { int namelen = sizeof(adapter->msix_info[0].desc) - 1; int pidx; /* * Firmware events. */ snprintf(adapter->msix_info[MSIX_FW].desc, namelen, "%s-FWeventq", adapter->name); adapter->msix_info[MSIX_FW].desc[namelen] = 0; /* * Ethernet queues. */ for_each_port(adapter, pidx) { struct net_device *dev = adapter->port[pidx]; const struct port_info *pi = netdev_priv(dev); int qs, msi; for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { snprintf(adapter->msix_info[msi].desc, namelen, "%s-%d", dev->name, qs); adapter->msix_info[msi].desc[namelen] = 0; } } } /* * Request all of our MSI-X resources. */ static int request_msix_queue_irqs(struct adapter *adapter) { struct sge *s = &adapter->sge; int rxq, msi, err; /* * Firmware events. */ err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix, 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq); if (err) return err; /* * Ethernet queues. */ msi = MSIX_IQFLINT; for_each_ethrxq(s, rxq) { err = request_irq(adapter->msix_info[msi].vec, t4vf_sge_intr_msix, 0, adapter->msix_info[msi].desc, &s->ethrxq[rxq].rspq); if (err) goto err_free_irqs; msi++; } return 0; err_free_irqs: while (--rxq >= 0) free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); return err; } /* * Free our MSI-X resources. */ static void free_msix_queue_irqs(struct adapter *adapter) { struct sge *s = &adapter->sge; int rxq, msi; free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); msi = MSIX_IQFLINT; for_each_ethrxq(s, rxq) free_irq(adapter->msix_info[msi++].vec, &s->ethrxq[rxq].rspq); } /* * Turn on NAPI and start up interrupts on a response queue. */ static void qenable(struct sge_rspq *rspq) { napi_enable(&rspq->napi); /* * 0-increment the Going To Sleep register to start the timer and * enable interrupts. */ t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, CIDXINC_V(0) | SEINTARM_V(rspq->intr_params) | INGRESSQID_V(rspq->cntxt_id)); } /* * Enable NAPI scheduling and interrupt generation for all Receive Queues. */ static void enable_rx(struct adapter *adapter) { int rxq; struct sge *s = &adapter->sge; for_each_ethrxq(s, rxq) qenable(&s->ethrxq[rxq].rspq); qenable(&s->fw_evtq); /* * The interrupt queue doesn't use NAPI so we do the 0-increment of * its Going To Sleep register here to get it started. */ if (adapter->flags & USING_MSI) t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, CIDXINC_V(0) | SEINTARM_V(s->intrq.intr_params) | INGRESSQID_V(s->intrq.cntxt_id)); } /* * Wait until all NAPI handlers are descheduled. */ static void quiesce_rx(struct adapter *adapter) { struct sge *s = &adapter->sge; int rxq; for_each_ethrxq(s, rxq) napi_disable(&s->ethrxq[rxq].rspq.napi); napi_disable(&s->fw_evtq.napi); } /* * Response queue handler for the firmware event queue. */ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, const struct pkt_gl *gl) { /* * Extract response opcode and get pointer to CPL message body. */ struct adapter *adapter = rspq->adapter; u8 opcode = ((const struct rss_header *)rsp)->opcode; void *cpl = (void *)(rsp + 1); switch (opcode) { case CPL_FW6_MSG: { /* * We've received an asynchronous message from the firmware. */ const struct cpl_fw6_msg *fw_msg = cpl; if (fw_msg->type == FW6_TYPE_CMD_RPL) t4vf_handle_fw_rpl(adapter, fw_msg->data); break; } case CPL_FW4_MSG: { /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. */ const struct cpl_sge_egr_update *p = (void *)(rsp + 3); opcode = CPL_OPCODE_G(ntohl(p->opcode_qid)); if (opcode != CPL_SGE_EGR_UPDATE) { dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" , opcode); break; } cpl = (void *)p; /*FALLTHROUGH*/ } case CPL_SGE_EGR_UPDATE: { /* * We've received an Egress Queue Status Update message. We * get these, if the SGE is configured to send these when the * firmware passes certain points in processing our TX * Ethernet Queue or if we make an explicit request for one. * We use these updates to determine when we may need to * restart a TX Ethernet Queue which was stopped for lack of * free TX Queue Descriptors ... */ const struct cpl_sge_egr_update *p = cpl; unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid)); struct sge *s = &adapter->sge; struct sge_txq *tq; struct sge_eth_txq *txq; unsigned int eq_idx; /* * Perform sanity checking on the Queue ID to make sure it * really refers to one of our TX Ethernet Egress Queues which * is active and matches the queue's ID. None of these error * conditions should ever happen so we may want to either make * them fatal and/or conditionalized under DEBUG. */ eq_idx = EQ_IDX(s, qid); if (unlikely(eq_idx >= MAX_EGRQ)) { dev_err(adapter->pdev_dev, "Egress Update QID %d out of range\n", qid); break; } tq = s->egr_map[eq_idx]; if (unlikely(tq == NULL)) { dev_err(adapter->pdev_dev, "Egress Update QID %d TXQ=NULL\n", qid); break; } txq = container_of(tq, struct sge_eth_txq, q); if (unlikely(tq->abs_id != qid)) { dev_err(adapter->pdev_dev, "Egress Update QID %d refers to TXQ %d\n", qid, tq->abs_id); break; } /* * Restart a stopped TX Queue which has less than half of its * TX ring in use ... */ txq->q.restarts++; netif_tx_wake_queue(txq->txq); break; } default: dev_err(adapter->pdev_dev, "unexpected CPL %#x on FW event queue\n", opcode); } return 0; } /* * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues * to use and initializes them. We support multiple "Queue Sets" per port if * we have MSI-X, otherwise just one queue set per port. */ static int setup_sge_queues(struct adapter *adapter) { struct sge *s = &adapter->sge; int err, pidx, msix; /* * Clear "Queue Set" Free List Starving and TX Queue Mapping Error * state. */ bitmap_zero(s->starving_fl, MAX_EGRQ); /* * If we're using MSI interrupt mode we need to set up a "forwarded * interrupt" queue which we'll set up with our MSI vector. The rest * of the ingress queues will be set up to forward their interrupts to * this queue ... This must be first since t4vf_sge_alloc_rxq() uses * the intrq's queue ID as the interrupt forwarding queue for the * subsequent calls ... */ if (adapter->flags & USING_MSI) { err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false, adapter->port[0], 0, NULL, NULL); if (err) goto err_free_queues; } /* * Allocate our ingress queue for asynchronous firmware messages. */ err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0], MSIX_FW, NULL, fwevtq_handler); if (err) goto err_free_queues; /* * Allocate each "port"'s initial Queue Sets. These can be changed * later on ... up to the point where any interface on the adapter is * brought up at which point lots of things get nailed down * permanently ... */ msix = MSIX_IQFLINT; for_each_port(adapter, pidx) { struct net_device *dev = adapter->port[pidx]; struct port_info *pi = netdev_priv(dev); struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; int qs; for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, dev, msix++, &rxq->fl, t4vf_ethrx_handler); if (err) goto err_free_queues; err = t4vf_sge_alloc_eth_txq(adapter, txq, dev, netdev_get_tx_queue(dev, qs), s->fw_evtq.cntxt_id); if (err) goto err_free_queues; rxq->rspq.idx = qs; memset(&rxq->stats, 0, sizeof(rxq->stats)); } } /* * Create the reverse mappings for the queues. */ s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id; s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id; IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq; for_each_port(adapter, pidx) { struct net_device *dev = adapter->port[pidx]; struct port_info *pi = netdev_priv(dev); struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; int qs; for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; EQ_MAP(s, txq->q.abs_id) = &txq->q; /* * The FW_IQ_CMD doesn't return the Absolute Queue IDs * for Free Lists but since all of the Egress Queues * (including Free Lists) have Relative Queue IDs * which are computed as Absolute - Base Queue ID, we * can synthesize the Absolute Queue IDs for the Free * Lists. This is useful for debugging purposes when * we want to dump Queue Contexts via the PF Driver. */ rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; } } return 0; err_free_queues: t4vf_free_sge_resources(adapter); return err; } /* * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive * queues. We configure the RSS CPU lookup table to distribute to the number * of HW receive queues, and the response queue lookup table to narrow that * down to the response queues actually configured for each "port" (Virtual * Interface). We always configure the RSS mapping for all ports since the * mapping table has plenty of entries. */ static int setup_rss(struct adapter *adapter) { int pidx; for_each_port(adapter, pidx) { struct port_info *pi = adap2pinfo(adapter, pidx); struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; u16 rss[MAX_PORT_QSETS]; int qs, err; for (qs = 0; qs < pi->nqsets; qs++) rss[qs] = rxq[qs].rspq.abs_id; err = t4vf_config_rss_range(adapter, pi->viid, 0, pi->rss_size, rss, pi->nqsets); if (err) return err; /* * Perform Global RSS Mode-specific initialization. */ switch (adapter->params.rss.mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: /* * If Tunnel All Lookup isn't specified in the global * RSS Configuration, then we need to specify a * default Ingress Queue for any ingress packets which * aren't hashed. We'll use our first ingress queue * ... */ if (!adapter->params.rss.u.basicvirtual.tnlalllookup) { union rss_vi_config config; err = t4vf_read_rss_vi_config(adapter, pi->viid, &config); if (err) return err; config.basicvirtual.defaultq = rxq[0].rspq.abs_id; err = t4vf_write_rss_vi_config(adapter, pi->viid, &config); if (err) return err; } break; } } return 0; } /* * Bring the adapter up. Called whenever we go from no "ports" open to having * one open. This function performs the actions necessary to make an adapter * operational, such as completing the initialization of HW modules, and * enabling interrupts. Must be called with the rtnl lock held. (Note that * this is called "cxgb_up" in the PF Driver.) */ static int adapter_up(struct adapter *adapter) { int err; /* * If this is the first time we've been called, perform basic * adapter setup. Once we've done this, many of our adapter * parameters can no longer be changed ... */ if ((adapter->flags & FULL_INIT_DONE) == 0) { err = setup_sge_queues(adapter); if (err) return err; err = setup_rss(adapter); if (err) { t4vf_free_sge_resources(adapter); return err; } if (adapter->flags & USING_MSIX) name_msix_vecs(adapter); adapter->flags |= FULL_INIT_DONE; } /* * Acquire our interrupt resources. We only support MSI-X and MSI. */ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); if (adapter->flags & USING_MSIX) err = request_msix_queue_irqs(adapter); else err = request_irq(adapter->pdev->irq, t4vf_intr_handler(adapter), 0, adapter->name, adapter); if (err) { dev_err(adapter->pdev_dev, "request_irq failed, err %d\n", err); return err; } /* * Enable NAPI ingress processing and return success. */ enable_rx(adapter); t4vf_sge_start(adapter); /* Initialize hash mac addr list*/ INIT_LIST_HEAD(&adapter->mac_hlist); return 0; } /* * Bring the adapter down. Called whenever the last "port" (Virtual * Interface) closed. (Note that this routine is called "cxgb_down" in the PF * Driver.) */ static void adapter_down(struct adapter *adapter) { /* * Free interrupt resources. */ if (adapter->flags & USING_MSIX) free_msix_queue_irqs(adapter); else free_irq(adapter->pdev->irq, adapter); /* * Wait for NAPI handlers to finish. */ quiesce_rx(adapter); } /* * Start up a net device. */ static int cxgb4vf_open(struct net_device *dev) { int err; struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; /* * If this is the first interface that we're opening on the "adapter", * bring the "adapter" up now. */ if (adapter->open_device_map == 0) { err = adapter_up(adapter); if (err) return err; } /* * Note that this interface is up and start everything up ... */ err = link_start(dev); if (err) goto err_unwind; netif_tx_start_all_queues(dev); set_bit(pi->port_id, &adapter->open_device_map); return 0; err_unwind: if (adapter->open_device_map == 0) adapter_down(adapter); return err; } /* * Shut down a net device. This routine is called "cxgb_close" in the PF * Driver ... */ static int cxgb4vf_stop(struct net_device *dev) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; netif_tx_stop_all_queues(dev); netif_carrier_off(dev); t4vf_enable_vi(adapter, pi->viid, false, false); pi->link_cfg.link_ok = 0; clear_bit(pi->port_id, &adapter->open_device_map); if (adapter->open_device_map == 0) adapter_down(adapter); return 0; } /* * Translate our basic statistics into the standard "ifconfig" statistics. */ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) { struct t4vf_port_stats stats; struct port_info *pi = netdev2pinfo(dev); struct adapter *adapter = pi->adapter; struct net_device_stats *ns = &dev->stats; int err; spin_lock(&adapter->stats_lock); err = t4vf_get_port_stats(adapter, pi->pidx, &stats); spin_unlock(&adapter->stats_lock); memset(ns, 0, sizeof(*ns)); if (err) return ns; ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes + stats.tx_ucast_bytes + stats.tx_offload_bytes); ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames + stats.tx_ucast_frames + stats.tx_offload_frames); ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes + stats.rx_ucast_bytes); ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames + stats.rx_ucast_frames); ns->multicast = stats.rx_mcast_frames; ns->tx_errors = stats.tx_drop_frames; ns->rx_errors = stats.rx_err_frames; return ns; } static inline int cxgb4vf_set_addr_hash(struct port_info *pi) { struct adapter *adapter = pi->adapter; u64 vec = 0; bool ucast = false; struct hash_mac_addr *entry; /* Calculate the hash vector for the updated list and program it */ list_for_each_entry(entry, &adapter->mac_hlist, list) { ucast |= is_unicast_ether_addr(entry->addr); vec |= (1ULL << hash_mac_addr(entry->addr)); } return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false); } static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr) { struct port_info *pi = netdev_priv(netdev); struct adapter *adapter = pi->adapter; int ret; u64 mhash = 0; u64 uhash = 0; bool free = false; bool ucast = is_unicast_ether_addr(mac_addr); const u8 *maclist[1] = {mac_addr}; struct hash_mac_addr *new_entry; ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist, NULL, ucast ? &uhash : &mhash, false); if (ret < 0) goto out; /* if hash != 0, then add the addr to hash addr list * so on the end we will calculate the hash for the * list and program it */ if (uhash || mhash) { new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); if (!new_entry) return -ENOMEM; ether_addr_copy(new_entry->addr, mac_addr); list_add_tail(&new_entry->list, &adapter->mac_hlist); ret = cxgb4vf_set_addr_hash(pi); } out: return ret < 0 ? ret : 0; } static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr) { struct port_info *pi = netdev_priv(netdev); struct adapter *adapter = pi->adapter; int ret; const u8 *maclist[1] = {mac_addr}; struct hash_mac_addr *entry, *tmp; /* If the MAC address to be removed is in the hash addr * list, delete it from the list and update hash vector */ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { if (ether_addr_equal(entry->addr, mac_addr)) { list_del(&entry->list); kfree(entry); return cxgb4vf_set_addr_hash(pi); } } ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false); return ret < 0 ? -EINVAL : 0; } /* * Set RX properties of a port, such as promiscruity, address filters, and MTU. * If @mtu is -1 it is left unchanged. */ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { struct port_info *pi = netdev_priv(dev); __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); return t4vf_set_rxmode(pi->adapter, pi->viid, -1, (dev->flags & IFF_PROMISC) != 0, (dev->flags & IFF_ALLMULTI) != 0, 1, -1, sleep_ok); } /* * Set the current receive modes on the device. */ static void cxgb4vf_set_rxmode(struct net_device *dev) { /* unfortunately we can't return errors to the stack */ set_rxmode(dev, -1, false); } /* * Find the entry in the interrupt holdoff timer value array which comes * closest to the specified interrupt holdoff value. */ static int closest_timer(const struct sge *s, int us) { int i, timer_idx = 0, min_delta = INT_MAX; for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { int delta = us - s->timer_val[i]; if (delta < 0) delta = -delta; if (delta < min_delta) { min_delta = delta; timer_idx = i; } } return timer_idx; } static int closest_thres(const struct sge *s, int thres) { int i, delta, pktcnt_idx = 0, min_delta = INT_MAX; for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { delta = thres - s->counter_val[i]; if (delta < 0) delta = -delta; if (delta < min_delta) { min_delta = delta; pktcnt_idx = i; } } return pktcnt_idx; } /* * Return a queue's interrupt hold-off time in us. 0 means no timer. */ static unsigned int qtimer_val(const struct adapter *adapter, const struct sge_rspq *rspq) { unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params); return timer_idx < SGE_NTIMERS ? adapter->sge.timer_val[timer_idx] : 0; } /** * set_rxq_intr_params - set a queue's interrupt holdoff parameters * @adapter: the adapter * @rspq: the RX response queue * @us: the hold-off time in us, or 0 to disable timer * @cnt: the hold-off packet count, or 0 to disable counter * * Sets an RX response queue's interrupt hold-off time and packet count. * At least one of the two needs to be enabled for the queue to generate * interrupts. */ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, unsigned int us, unsigned int cnt) { unsigned int timer_idx; /* * If both the interrupt holdoff timer and count are specified as * zero, default to a holdoff count of 1 ... */ if ((us | cnt) == 0) cnt = 1; /* * If an interrupt holdoff count has been specified, then find the * closest configured holdoff count and use that. If the response * queue has already been created, then update its queue context * parameters ... */ if (cnt) { int err; u32 v, pktcnt_idx; pktcnt_idx = closest_thres(&adapter->sge, cnt); if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | FW_PARAMS_PARAM_X_V( FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id); err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); if (err) return err; } rspq->pktcnt_idx = pktcnt_idx; } /* * Compute the closest holdoff timer index from the supplied holdoff * timer value. */ timer_idx = (us == 0 ? SGE_TIMER_RSTRT_CNTR : closest_timer(&adapter->sge, us)); /* * Update the response queue's interrupt coalescing parameters and * return success. */ rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) | QINTR_CNT_EN_V(cnt > 0)); return 0; } /* * Return a version number to identify the type of adapter. The scheme is: * - bits 0..9: chip version * - bits 10..15: chip revision */ static inline unsigned int mk_adap_vers(const struct adapter *adapter) { /* * Chip version 4, revision 0x3f (cxgb4vf). */ return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); } /* * Execute the specified ioctl command. */ static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int ret = 0; switch (cmd) { /* * The VF Driver doesn't have access to any of the other * common Ethernet device ioctl()'s (like reading/writing * PHY registers, etc. */ default: ret = -EOPNOTSUPP; break; } return ret; } /* * Change the device's MTU. */ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) { int ret; struct port_info *pi = netdev_priv(dev); ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) dev->mtu = new_mtu; return ret; } static netdev_features_t cxgb4vf_fix_features(struct net_device *dev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int cxgb4vf_set_features(struct net_device *dev, netdev_features_t features) { struct port_info *pi = netdev_priv(dev); netdev_features_t changed = dev->features ^ features; if (changed & NETIF_F_HW_VLAN_CTAG_RX) t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, features & NETIF_F_HW_VLAN_CTAG_TX, 0); return 0; } /* * Change the devices MAC address. */ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) { int ret; struct sockaddr *addr = _addr; struct port_info *pi = netdev_priv(dev); if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt, addr->sa_data, true); if (ret < 0) return ret; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); pi->xact_addr_filt = ret; return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Poll all of our receive queues. This is called outside of normal interrupt * context. */ static void cxgb4vf_poll_controller(struct net_device *dev) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; if (adapter->flags & USING_MSIX) { struct sge_eth_rxq *rxq; int nqsets; rxq = &adapter->sge.ethrxq[pi->first_qset]; for (nqsets = pi->nqsets; nqsets; nqsets--) { t4vf_sge_intr_msix(0, &rxq->rspq); rxq++; } } else t4vf_intr_handler(adapter)(0, adapter); } #endif /* * Ethtool operations. * =================== * * Note that we don't support any ethtool operations which change the physical * state of the port to which we're linked. */ /** * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool * @port_type: Firmware Port Type * @mod_type: Firmware Module Type * * Translate Firmware Port/Module type to Ethtool Port Type. */ static int from_fw_port_mod_type(enum fw_port_type port_type, enum fw_port_module_type mod_type) { if (port_type == FW_PORT_TYPE_BT_SGMII || port_type == FW_PORT_TYPE_BT_XFI || port_type == FW_PORT_TYPE_BT_XAUI) { return PORT_TP; } else if (port_type == FW_PORT_TYPE_FIBER_XFI || port_type == FW_PORT_TYPE_FIBER_XAUI) { return PORT_FIBRE; } else if (port_type == FW_PORT_TYPE_SFP || port_type == FW_PORT_TYPE_QSFP_10G || port_type == FW_PORT_TYPE_QSA || port_type == FW_PORT_TYPE_QSFP) { if (mod_type == FW_PORT_MOD_TYPE_LR || mod_type == FW_PORT_MOD_TYPE_SR || mod_type == FW_PORT_MOD_TYPE_ER || mod_type == FW_PORT_MOD_TYPE_LRM) return PORT_FIBRE; else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) return PORT_DA; else return PORT_OTHER; } return PORT_OTHER; } /** * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask * @port_type: Firmware Port Type * @fw_caps: Firmware Port Capabilities * @link_mode_mask: ethtool Link Mode Mask * * Translate a Firmware Port Capabilities specification to an ethtool * Link Mode Mask. */ static void fw_caps_to_lmm(enum fw_port_type port_type, unsigned int fw_caps, unsigned long *link_mode_mask) { #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\ ## _BIT, link_mode_mask) #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ do { \ if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ SET_LMM(__lmm_name); \ } while (0) switch (port_type) { case FW_PORT_TYPE_BT_SGMII: case FW_PORT_TYPE_BT_XFI: case FW_PORT_TYPE_BT_XAUI: SET_LMM(TP); FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full); FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); break; case FW_PORT_TYPE_KX4: case FW_PORT_TYPE_KX: SET_LMM(Backplane); FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); break; case FW_PORT_TYPE_KR: SET_LMM(Backplane); SET_LMM(10000baseKR_Full); break; case FW_PORT_TYPE_BP_AP: SET_LMM(Backplane); SET_LMM(10000baseR_FEC); SET_LMM(10000baseKR_Full); SET_LMM(1000baseKX_Full); break; case FW_PORT_TYPE_BP4_AP: SET_LMM(Backplane); SET_LMM(10000baseR_FEC); SET_LMM(10000baseKR_Full); SET_LMM(1000baseKX_Full); SET_LMM(10000baseKX4_Full); break; case FW_PORT_TYPE_FIBER_XFI: case FW_PORT_TYPE_FIBER_XAUI: case FW_PORT_TYPE_SFP: case FW_PORT_TYPE_QSFP_10G: case FW_PORT_TYPE_QSA: SET_LMM(FIBRE); FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); break; case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_QSFP: SET_LMM(FIBRE); SET_LMM(40000baseSR4_Full); break; case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_SFP28: SET_LMM(FIBRE); SET_LMM(25000baseCR_Full); break; case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); SET_LMM(100000baseCR4_Full); break; default: break; } FW_CAPS_TO_LMM(ANEG, Autoneg); FW_CAPS_TO_LMM(802_3_PAUSE, Pause); FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); #undef FW_CAPS_TO_LMM #undef SET_LMM } static int cxgb4vf_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings) { const struct port_info *pi = netdev_priv(dev); struct ethtool_link_settings *base = &link_ksettings->base; ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); if (pi->mdio_addr >= 0) { base->phy_address = pi->mdio_addr; base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII ? ETH_MDIO_SUPPORTS_C22 : ETH_MDIO_SUPPORTS_C45); } else { base->phy_address = 255; base->mdio_support = 0; } fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, link_ksettings->link_modes.supported); fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, link_ksettings->link_modes.advertising); fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, link_ksettings->link_modes.lp_advertising); if (netif_carrier_ok(dev)) { base->speed = pi->link_cfg.speed; base->duplex = DUPLEX_FULL; } else { base->speed = SPEED_UNKNOWN; base->duplex = DUPLEX_UNKNOWN; } base->autoneg = pi->link_cfg.autoneg; if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); if (pi->link_cfg.autoneg) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Autoneg); return 0; } /* * Return our driver information. */ static void cxgb4vf_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct adapter *adapter = netdev2adap(dev); strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)), sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev), FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev), FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev), FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev), FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev), FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev), FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev), FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev)); } /* * Return current adapter message level. */ static u32 cxgb4vf_get_msglevel(struct net_device *dev) { return netdev2adap(dev)->msg_enable; } /* * Set current adapter message level. */ static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel) { netdev2adap(dev)->msg_enable = msglevel; } /* * Return the device's current Queue Set ring size parameters along with the * allowed maximum values. Since ethtool doesn't understand the concept of * multi-queue devices, we just return the current values associated with the * first Queue Set. */ static void cxgb4vf_get_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { const struct port_info *pi = netdev_priv(dev); const struct sge *s = &pi->adapter->sge; rp->rx_max_pending = MAX_RX_BUFFERS; rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES; rp->rx_jumbo_max_pending = 0; rp->tx_max_pending = MAX_TXQ_ENTRIES; rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; rp->rx_jumbo_pending = 0; rp->tx_pending = s->ethtxq[pi->first_qset].q.size; } /* * Set the Queue Set ring size parameters for the device. Again, since * ethtool doesn't allow for the concept of multiple queues per device, we'll * apply these new values across all of the Queue Sets associated with the * device -- after vetting them of course! */ static int cxgb4vf_set_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { const struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; int qs; if (rp->rx_pending > MAX_RX_BUFFERS || rp->rx_jumbo_pending || rp->tx_pending > MAX_TXQ_ENTRIES || rp->rx_mini_pending > MAX_RSPQ_ENTRIES || rp->rx_mini_pending < MIN_RSPQ_ENTRIES || rp->rx_pending < MIN_FL_ENTRIES || rp->tx_pending < MIN_TXQ_ENTRIES) return -EINVAL; if (adapter->flags & FULL_INIT_DONE) return -EBUSY; for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) { s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; s->ethrxq[qs].rspq.size = rp->rx_mini_pending; s->ethtxq[qs].q.size = rp->tx_pending; } return 0; } /* * Return the interrupt holdoff timer and count for the first Queue Set on the * device. Our extension ioctl() (the cxgbtool interface) allows the * interrupt holdoff timer to be read on all of the device's Queue Sets. */ static int cxgb4vf_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coalesce) { const struct port_info *pi = netdev_priv(dev); const struct adapter *adapter = pi->adapter; const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); coalesce->rx_max_coalesced_frames = ((rspq->intr_params & QINTR_CNT_EN_F) ? adapter->sge.counter_val[rspq->pktcnt_idx] : 0); return 0; } /* * Set the RX interrupt holdoff timer and count for the first Queue Set on the * interface. Our extension ioctl() (the cxgbtool interface) allows us to set * the interrupt holdoff timer on any of the device's Queue Sets. */ static int cxgb4vf_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coalesce) { const struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; return set_rxq_intr_params(adapter, &adapter->sge.ethrxq[pi->first_qset].rspq, coalesce->rx_coalesce_usecs, coalesce->rx_max_coalesced_frames); } /* * Report current port link pause parameter settings. */ static void cxgb4vf_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pauseparam) { struct port_info *pi = netdev_priv(dev); pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; } /* * Identify the port by blinking the port's LED. */ static int cxgb4vf_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { unsigned int val; struct port_info *pi = netdev_priv(dev); if (state == ETHTOOL_ID_ACTIVE) val = 0xffff; else if (state == ETHTOOL_ID_INACTIVE) val = 0; else return -EINVAL; return t4vf_identify_port(pi->adapter, pi->viid, val); } /* * Port stats maintained per queue of the port. */ struct queue_port_stats { u64 tso; u64 tx_csum; u64 rx_csum; u64 vlan_ex; u64 vlan_ins; u64 lro_pkts; u64 lro_merged; }; /* * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that * these need to match the order of statistics returned by * t4vf_get_port_stats(). */ static const char stats_strings[][ETH_GSTRING_LEN] = { /* * These must match the layout of the t4vf_port_stats structure. */ "TxBroadcastBytes ", "TxBroadcastFrames ", "TxMulticastBytes ", "TxMulticastFrames ", "TxUnicastBytes ", "TxUnicastFrames ", "TxDroppedFrames ", "TxOffloadBytes ", "TxOffloadFrames ", "RxBroadcastBytes ", "RxBroadcastFrames ", "RxMulticastBytes ", "RxMulticastFrames ", "RxUnicastBytes ", "RxUnicastFrames ", "RxErrorFrames ", /* * These are accumulated per-queue statistics and must match the * order of the fields in the queue_port_stats structure. */ "TSO ", "TxCsumOffload ", "RxCsumGood ", "VLANextractions ", "VLANinsertions ", "GROPackets ", "GROMerged ", }; /* * Return the number of statistics in the specified statistics set. */ static int cxgb4vf_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings); default: return -EOPNOTSUPP; } /*NOTREACHED*/ } /* * Return the strings for the specified statistics set. */ static void cxgb4vf_get_strings(struct net_device *dev, u32 sset, u8 *data) { switch (sset) { case ETH_SS_STATS: memcpy(data, stats_strings, sizeof(stats_strings)); break; } } /* * Small utility routine to accumulate queue statistics across the queues of * a "port". */ static void collect_sge_port_stats(const struct adapter *adapter, const struct port_info *pi, struct queue_port_stats *stats) { const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset]; const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; int qs; memset(stats, 0, sizeof(*stats)); for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { stats->tso += txq->tso; stats->tx_csum += txq->tx_cso; stats->rx_csum += rxq->stats.rx_cso; stats->vlan_ex += rxq->stats.vlan_ex; stats->vlan_ins += txq->vlan_ins; stats->lro_pkts += rxq->stats.lro_pkts; stats->lro_merged += rxq->stats.lro_merged; } } /* * Return the ETH_SS_STATS statistics set. */ static void cxgb4vf_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct port_info *pi = netdev2pinfo(dev); struct adapter *adapter = pi->adapter; int err = t4vf_get_port_stats(adapter, pi->pidx, (struct t4vf_port_stats *)data); if (err) memset(data, 0, sizeof(struct t4vf_port_stats)); data += sizeof(struct t4vf_port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); } /* * Return the size of our register map. */ static int cxgb4vf_get_regs_len(struct net_device *dev) { return T4VF_REGMAP_SIZE; } /* * Dump a block of registers, start to end inclusive, into a buffer. */ static void reg_block_dump(struct adapter *adapter, void *regbuf, unsigned int start, unsigned int end) { u32 *bp = regbuf + start - T4VF_REGMAP_START; for ( ; start <= end; start += sizeof(u32)) { /* * Avoid reading the Mailbox Control register since that * can trigger a Mailbox Ownership Arbitration cycle and * interfere with communication with the firmware. */ if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL) *bp++ = 0xffff; else *bp++ = t4_read_reg(adapter, start); } } /* * Copy our entire register map into the provided buffer. */ static void cxgb4vf_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) { struct adapter *adapter = netdev2adap(dev); regs->version = mk_adap_vers(adapter); /* * Fill in register buffer with our register map. */ memset(regbuf, 0, T4VF_REGMAP_SIZE); reg_block_dump(adapter, regbuf, T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST, T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST); reg_block_dump(adapter, regbuf, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); /* T5 adds new registers in the PL Register map. */ reg_block_dump(adapter, regbuf, T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) ? PL_VF_WHOAMI_A : PL_VF_REVISION_A)); reg_block_dump(adapter, regbuf, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); reg_block_dump(adapter, regbuf, T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST, T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST); } /* * Report current Wake On LAN settings. */ static void cxgb4vf_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } /* * TCP Segmentation Offload flags which we support. */ #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) static const struct ethtool_ops cxgb4vf_ethtool_ops = { .get_link_ksettings = cxgb4vf_get_link_ksettings, .get_drvinfo = cxgb4vf_get_drvinfo, .get_msglevel = cxgb4vf_get_msglevel, .set_msglevel = cxgb4vf_set_msglevel, .get_ringparam = cxgb4vf_get_ringparam, .set_ringparam = cxgb4vf_set_ringparam, .get_coalesce = cxgb4vf_get_coalesce, .set_coalesce = cxgb4vf_set_coalesce, .get_pauseparam = cxgb4vf_get_pauseparam, .get_link = ethtool_op_get_link, .get_strings = cxgb4vf_get_strings, .set_phys_id = cxgb4vf_phys_id, .get_sset_count = cxgb4vf_get_sset_count, .get_ethtool_stats = cxgb4vf_get_ethtool_stats, .get_regs_len = cxgb4vf_get_regs_len, .get_regs = cxgb4vf_get_regs, .get_wol = cxgb4vf_get_wol, }; /* * /sys/kernel/debug/cxgb4vf support code and data. * ================================================ */ /* * Show Firmware Mailbox Command/Reply Log * * Note that we don't do any locking when dumping the Firmware Mailbox Log so * it's possible that we can catch things during a log update and therefore * see partially corrupted log entries. But i9t's probably Good Enough(tm). * If we ever decide that we want to make sure that we're dumping a coherent * log, we'd need to perform locking in the mailbox logging and in * mboxlog_open() where we'd need to grab the entire mailbox log in one go * like we do for the Firmware Device Log. But as stated above, meh ... */ static int mboxlog_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; struct mbox_cmd_log *log = adapter->mbox_log; struct mbox_cmd *entry; int entry_idx, i; if (v == SEQ_START_TOKEN) { seq_printf(seq, "%10s %15s %5s %5s %s\n", "Seq#", "Tstamp", "Atime", "Etime", "Command/Reply"); return 0; } entry_idx = log->cursor + ((uintptr_t)v - 2); if (entry_idx >= log->size) entry_idx -= log->size; entry = mbox_cmd_log_entry(log, entry_idx); /* skip over unused entries */ if (entry->timestamp == 0) return 0; seq_printf(seq, "%10u %15llu %5d %5d", entry->seqno, entry->timestamp, entry->access, entry->execute); for (i = 0; i < MBOX_LEN / 8; i++) { u64 flit = entry->cmd[i]; u32 hi = (u32)(flit >> 32); u32 lo = (u32)flit; seq_printf(seq, " %08x %08x", hi, lo); } seq_puts(seq, "\n"); return 0; } static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos) { struct adapter *adapter = seq->private; struct mbox_cmd_log *log = adapter->mbox_log; return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL); } static void *mboxlog_start(struct seq_file *seq, loff_t *pos) { return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN; } static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return mboxlog_get_idx(seq, *pos); } static void mboxlog_stop(struct seq_file *seq, void *v) { } static const struct seq_operations mboxlog_seq_ops = { .start = mboxlog_start, .next = mboxlog_next, .stop = mboxlog_stop, .show = mboxlog_show }; static int mboxlog_open(struct inode *inode, struct file *file) { int res = seq_open(file, &mboxlog_seq_ops); if (!res) { struct seq_file *seq = file->private_data; seq->private = inode->i_private; } return res; } static const struct file_operations mboxlog_fops = { .owner = THIS_MODULE, .open = mboxlog_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * Show SGE Queue Set information. We display QPL Queues Sets per line. */ #define QPL 4 static int sge_qinfo_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); int qs, r = (uintptr_t)v - 1; if (r) seq_putc(seq, '\n'); #define S3(fmt_spec, s, v) \ do {\ seq_printf(seq, "%-12s", s); \ for (qs = 0; qs < n; ++qs) \ seq_printf(seq, " %16" fmt_spec, v); \ seq_putc(seq, '\n'); \ } while (0) #define S(s, v) S3("s", s, v) #define T(s, v) S3("u", s, txq[qs].v) #define R(s, v) S3("u", s, rxq[qs].v) if (r < eth_entries) { const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; int n = min(QPL, adapter->sge.ethqsets - QPL * r); S("QType:", "Ethernet"); S("Interface:", (rxq[qs].rspq.netdev ? rxq[qs].rspq.netdev->name : "N/A")); S3("d", "Port:", (rxq[qs].rspq.netdev ? ((struct port_info *) netdev_priv(rxq[qs].rspq.netdev))->port_id : -1)); T("TxQ ID:", q.abs_id); T("TxQ size:", q.size); T("TxQ inuse:", q.in_use); T("TxQ PIdx:", q.pidx); T("TxQ CIdx:", q.cidx); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq)); S3("u", "Intr pktcnt:", adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); R("RspQ CIdx:", rspq.cidx); R("RspQ Gen:", rspq.gen); R("FL ID:", fl.abs_id); R("FL size:", fl.size - MIN_FL_RESID); R("FL avail:", fl.avail); R("FL PIdx:", fl.pidx); R("FL CIdx:", fl.cidx); return 0; } r -= eth_entries; if (r == 0) { const struct sge_rspq *evtq = &adapter->sge.fw_evtq; seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); seq_printf(seq, "%-12s %16u\n", "Intr delay:", qtimer_val(adapter, evtq)); seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", adapter->sge.counter_val[evtq->pktcnt_idx]); seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx); seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); } else if (r == 1) { const struct sge_rspq *intrq = &adapter->sge.intrq; seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue"); seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id); seq_printf(seq, "%-12s %16u\n", "Intr delay:", qtimer_val(adapter, intrq)); seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", adapter->sge.counter_val[intrq->pktcnt_idx]); seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx); seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen); } #undef R #undef T #undef S #undef S3 return 0; } /* * Return the number of "entries" in our "file". We group the multi-Queue * sections with QPL Queue Sets per "entry". The sections of the output are: * * Ethernet RX/TX Queue Sets * Firmware Event Queue * Forwarded Interrupt Queue (if in MSI mode) */ static int sge_queue_entries(const struct adapter *adapter) { return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + ((adapter->flags & USING_MSI) != 0); } static void *sge_queue_start(struct seq_file *seq, loff_t *pos) { int entries = sge_queue_entries(seq->private); return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; } static void sge_queue_stop(struct seq_file *seq, void *v) { } static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos) { int entries = sge_queue_entries(seq->private); ++*pos; return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; } static const struct seq_operations sge_qinfo_seq_ops = { .start = sge_queue_start, .next = sge_queue_next, .stop = sge_queue_stop, .show = sge_qinfo_show }; static int sge_qinfo_open(struct inode *inode, struct file *file) { int res = seq_open(file, &sge_qinfo_seq_ops); if (!res) { struct seq_file *seq = file->private_data; seq->private = inode->i_private; } return res; } static const struct file_operations sge_qinfo_debugfs_fops = { .owner = THIS_MODULE, .open = sge_qinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * Show SGE Queue Set statistics. We display QPL Queues Sets per line. */ #define QPL 4 static int sge_qstats_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); int qs, r = (uintptr_t)v - 1; if (r) seq_putc(seq, '\n'); #define S3(fmt, s, v) \ do { \ seq_printf(seq, "%-16s", s); \ for (qs = 0; qs < n; ++qs) \ seq_printf(seq, " %8" fmt, v); \ seq_putc(seq, '\n'); \ } while (0) #define S(s, v) S3("s", s, v) #define T3(fmt, s, v) S3(fmt, s, txq[qs].v) #define T(s, v) T3("lu", s, v) #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v) #define R(s, v) R3("lu", s, v) if (r < eth_entries) { const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; int n = min(QPL, adapter->sge.ethqsets - QPL * r); S("QType:", "Ethernet"); S("Interface:", (rxq[qs].rspq.netdev ? rxq[qs].rspq.netdev->name : "N/A")); R3("u", "RspQNullInts:", rspq.unhandled_irqs); R("RxPackets:", stats.pkts); R("RxCSO:", stats.rx_cso); R("VLANxtract:", stats.vlan_ex); R("LROmerged:", stats.lro_merged); R("LROpackets:", stats.lro_pkts); R("RxDrops:", stats.rx_drops); T("TSO:", tso); T("TxCSO:", tx_cso); T("VLANins:", vlan_ins); T("TxQFull:", q.stops); T("TxQRestarts:", q.restarts); T("TxMapErr:", mapping_err); R("FLAllocErr:", fl.alloc_failed); R("FLLrgAlcErr:", fl.large_alloc_failed); R("FLStarving:", fl.starving); return 0; } r -= eth_entries; if (r == 0) { const struct sge_rspq *evtq = &adapter->sge.fw_evtq; seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue"); seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", evtq->unhandled_irqs); seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx); seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen); } else if (r == 1) { const struct sge_rspq *intrq = &adapter->sge.intrq; seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue"); seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", intrq->unhandled_irqs); seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx); seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen); } #undef R #undef T #undef S #undef R3 #undef T3 #undef S3 return 0; } /* * Return the number of "entries" in our "file". We group the multi-Queue * sections with QPL Queue Sets per "entry". The sections of the output are: * * Ethernet RX/TX Queue Sets * Firmware Event Queue * Forwarded Interrupt Queue (if in MSI mode) */ static int sge_qstats_entries(const struct adapter *adapter) { return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + ((adapter->flags & USING_MSI) != 0); } static void *sge_qstats_start(struct seq_file *seq, loff_t *pos) { int entries = sge_qstats_entries(seq->private); return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; } static void sge_qstats_stop(struct seq_file *seq, void *v) { } static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos) { int entries = sge_qstats_entries(seq->private); (*pos)++; return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; } static const struct seq_operations sge_qstats_seq_ops = { .start = sge_qstats_start, .next = sge_qstats_next, .stop = sge_qstats_stop, .show = sge_qstats_show }; static int sge_qstats_open(struct inode *inode, struct file *file) { int res = seq_open(file, &sge_qstats_seq_ops); if (res == 0) { struct seq_file *seq = file->private_data; seq->private = inode->i_private; } return res; } static const struct file_operations sge_qstats_proc_fops = { .owner = THIS_MODULE, .open = sge_qstats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * Show PCI-E SR-IOV Virtual Function Resource Limits. */ static int resources_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; struct vf_resources *vfres = &adapter->params.vfres; #define S(desc, fmt, var) \ seq_printf(seq, "%-60s " fmt "\n", \ desc " (" #var "):", vfres->var) S("Virtual Interfaces", "%d", nvi); S("Egress Queues", "%d", neq); S("Ethernet Control", "%d", nethctrl); S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); S("Ingress Queues", "%d", niq); S("Traffic Class", "%d", tc); S("Port Access Rights Mask", "%#x", pmask); S("MAC Address Filters", "%d", nexactf); S("Firmware Command Read Capabilities", "%#x", r_caps); S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); #undef S return 0; } static int resources_open(struct inode *inode, struct file *file) { return single_open(file, resources_show, inode->i_private); } static const struct file_operations resources_proc_fops = { .owner = THIS_MODULE, .open = resources_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Show Virtual Interfaces. */ static int interfaces_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, "Interface Port VIID\n"); } else { struct adapter *adapter = seq->private; int pidx = (uintptr_t)v - 2; struct net_device *dev = adapter->port[pidx]; struct port_info *pi = netdev_priv(dev); seq_printf(seq, "%9s %4d %#5x\n", dev->name, pi->port_id, pi->viid); } return 0; } static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos) { return pos <= adapter->params.nports ? (void *)(uintptr_t)(pos + 1) : NULL; } static void *interfaces_start(struct seq_file *seq, loff_t *pos) { return *pos ? interfaces_get_idx(seq->private, *pos) : SEQ_START_TOKEN; } static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos) { (*pos)++; return interfaces_get_idx(seq->private, *pos); } static void interfaces_stop(struct seq_file *seq, void *v) { } static const struct seq_operations interfaces_seq_ops = { .start = interfaces_start, .next = interfaces_next, .stop = interfaces_stop, .show = interfaces_show }; static int interfaces_open(struct inode *inode, struct file *file) { int res = seq_open(file, &interfaces_seq_ops); if (res == 0) { struct seq_file *seq = file->private_data; seq->private = inode->i_private; } return res; } static const struct file_operations interfaces_proc_fops = { .owner = THIS_MODULE, .open = interfaces_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * /sys/kernel/debugfs/cxgb4vf/ files list. */ struct cxgb4vf_debugfs_entry { const char *name; /* name of debugfs node */ umode_t mode; /* file system mode */ const struct file_operations *fops; }; static struct cxgb4vf_debugfs_entry debugfs_files[] = { { "mboxlog", S_IRUGO, &mboxlog_fops }, { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, { "resources", S_IRUGO, &resources_proc_fops }, { "interfaces", S_IRUGO, &interfaces_proc_fops }, }; /* * Module and device initialization and cleanup code. * ================================================== */ /* * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the * directory (debugfs_root) has already been set up. */ static int setup_debugfs(struct adapter *adapter) { int i; BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Debugfs support is best effort. */ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) (void)debugfs_create_file(debugfs_files[i].name, debugfs_files[i].mode, adapter->debugfs_root, (void *)adapter, debugfs_files[i].fops); return 0; } /* * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave * it to our caller to tear down the directory (debugfs_root). */ static void cleanup_debugfs(struct adapter *adapter) { BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Unlike our sister routine cleanup_proc(), we don't need to remove * individual entries because a call will be made to * debugfs_remove_recursive(). We just need to clean up any ancillary * persistent state. */ /* nothing to do */ } /* Figure out how many Ports and Queue Sets we can support. This depends on * knowing our Virtual Function Resources and may be called a second time if * we fall back from MSI-X to MSI Interrupt Mode. */ static void size_nports_qsets(struct adapter *adapter) { struct vf_resources *vfres = &adapter->params.vfres; unsigned int ethqsets, pmask_nports; /* The number of "ports" which we support is equal to the number of * Virtual Interfaces with which we've been provisioned. */ adapter->params.nports = vfres->nvi; if (adapter->params.nports > MAX_NPORTS) { dev_warn(adapter->pdev_dev, "only using %d of %d maximum" " allowed virtual interfaces\n", MAX_NPORTS, adapter->params.nports); adapter->params.nports = MAX_NPORTS; } /* We may have been provisioned with more VIs than the number of * ports we're allowed to access (our Port Access Rights Mask). * This is obviously a configuration conflict but we don't want to * crash the kernel or anything silly just because of that. */ pmask_nports = hweight32(adapter->params.vfres.pmask); if (pmask_nports < adapter->params.nports) { dev_warn(adapter->pdev_dev, "only using %d of %d provisioned" " virtual interfaces; limited by Port Access Rights" " mask %#x\n", pmask_nports, adapter->params.nports, adapter->params.vfres.pmask); adapter->params.nports = pmask_nports; } /* We need to reserve an Ingress Queue for the Asynchronous Firmware * Event Queue. And if we're using MSI Interrupts, we'll also need to * reserve an Ingress Queue for a Forwarded Interrupts. * * The rest of the FL/Intr-capable ingress queues will be matched up * one-for-one with Ethernet/Control egress queues in order to form * "Queue Sets" which will be aportioned between the "ports". For * each Queue Set, we'll need the ability to allocate two Egress * Contexts -- one for the Ingress Queue Free List and one for the TX * Ethernet Queue. * * Note that even if we're currently configured to use MSI-X * Interrupts (module variable msi == MSI_MSIX) we may get downgraded * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that * happens we'll need to adjust things later. */ ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI); if (vfres->nethctrl != ethqsets) ethqsets = min(vfres->nethctrl, ethqsets); if (vfres->neq < ethqsets*2) ethqsets = vfres->neq/2; if (ethqsets > MAX_ETH_QSETS) ethqsets = MAX_ETH_QSETS; adapter->sge.max_ethqsets = ethqsets; if (adapter->sge.max_ethqsets < adapter->params.nports) { dev_warn(adapter->pdev_dev, "only using %d of %d available" " virtual interfaces (too few Queue Sets)\n", adapter->sge.max_ethqsets, adapter->params.nports); adapter->params.nports = adapter->sge.max_ethqsets; } } /* * Perform early "adapter" initialization. This is where we discover what * adapter parameters we're going to be using and initialize basic adapter * hardware support. */ static int adap_init0(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; struct sge *s = &adapter->sge; int err; u32 param, val = 0; /* * Some environments do not properly handle PCIE FLRs -- e.g. in Linux * 2.6.31 and later we can't call pci_reset_function() in order to * issue an FLR because of a self- deadlock on the device semaphore. * Meanwhile, the OS infrastructure doesn't issue FLRs in all the * cases where they're needed -- for instance, some versions of KVM * fail to reset "Assigned Devices" when the VM reboots. Therefore we * use the firmware based reset in order to reset any per function * state. */ err = t4vf_fw_reset(adapter); if (err < 0) { dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); return err; } /* * Grab basic operational parameters. These will predominantly have * been set up by the Physical Function Driver or will be hard coded * into the adapter. We just have to live with them ... Note that * we _must_ get our VPD parameters before our SGE parameters because * we need to know the adapter's core clock from the VPD in order to * properly decode the SGE Timer Values. */ err = t4vf_get_dev_params(adapter); if (err) { dev_err(adapter->pdev_dev, "unable to retrieve adapter" " device parameters: err=%d\n", err); return err; } err = t4vf_get_vpd_params(adapter); if (err) { dev_err(adapter->pdev_dev, "unable to retrieve adapter" " VPD parameters: err=%d\n", err); return err; } err = t4vf_get_sge_params(adapter); if (err) { dev_err(adapter->pdev_dev, "unable to retrieve adapter" " SGE parameters: err=%d\n", err); return err; } err = t4vf_get_rss_glb_config(adapter); if (err) { dev_err(adapter->pdev_dev, "unable to retrieve adapter" " RSS parameters: err=%d\n", err); return err; } if (adapter->params.rss.mode != FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { dev_err(adapter->pdev_dev, "unable to operate with global RSS" " mode %d\n", adapter->params.rss.mode); return -EINVAL; } err = t4vf_sge_init(adapter); if (err) { dev_err(adapter->pdev_dev, "unable to use adapter parameters:" " err=%d\n", err); return err; } /* If we're running on newer firmware, let it know that we're * prepared to deal with encapsulated CPL messages. Older * firmware won't understand this and we'll just get * unencapsulated messages ... */ param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); val = 1; (void) t4vf_set_params(adapter, 1, &param, &val); /* * Retrieve our RX interrupt holdoff timer values and counter * threshold values from the SGE parameters. */ s->timer_val[0] = core_ticks_to_us(adapter, TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1)); s->timer_val[1] = core_ticks_to_us(adapter, TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1)); s->timer_val[2] = core_ticks_to_us(adapter, TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3)); s->timer_val[3] = core_ticks_to_us(adapter, TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3)); s->timer_val[4] = core_ticks_to_us(adapter, TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5)); s->timer_val[5] = core_ticks_to_us(adapter, TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5)); s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold); s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold); s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold); s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold); /* * Grab our Virtual Interface resource allocation, extract the * features that we're interested in and do a bit of sanity testing on * what we discover. */ err = t4vf_get_vfres(adapter); if (err) { dev_err(adapter->pdev_dev, "unable to get virtual interface" " resources: err=%d\n", err); return err; } /* Check for various parameter sanity issues */ if (adapter->params.vfres.pmask == 0) { dev_err(adapter->pdev_dev, "no port access configured\n" "usable!\n"); return -EINVAL; } if (adapter->params.vfres.nvi == 0) { dev_err(adapter->pdev_dev, "no virtual interfaces configured/" "usable!\n"); return -EINVAL; } /* Initialize nports and max_ethqsets now that we have our Virtual * Function Resources. */ size_nports_qsets(adapter); return 0; } static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx, u8 pkt_cnt_idx, unsigned int size, unsigned int iqe_size) { rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) | (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN_F : 0)); rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0); rspq->iqe_len = iqe_size; rspq->size = size; } /* * Perform default configuration of DMA queues depending on the number and * type of ports we found and the number of available CPUs. Most settings can * be modified by the admin via ethtool and cxgbtool prior to the adapter * being brought up for the first time. */ static void cfg_queues(struct adapter *adapter) { struct sge *s = &adapter->sge; int q10g, n10g, qidx, pidx, qs; size_t iqe_size; /* * We should not be called till we know how many Queue Sets we can * support. In particular, this means that we need to know what kind * of interrupts we'll be using ... */ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); /* * Count the number of 10GbE Virtual Interfaces that we have. */ n10g = 0; for_each_port(adapter, pidx) n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); /* * We default to 1 queue per non-10G port and up to # of cores queues * per 10G port. */ if (n10g == 0) q10g = 0; else { int n1g = (adapter->params.nports - n10g); q10g = (adapter->sge.max_ethqsets - n1g) / n10g; if (q10g > num_online_cpus()) q10g = num_online_cpus(); } /* * Allocate the "Queue Sets" to the various Virtual Interfaces. * The layout will be established in setup_sge_queues() when the * adapter is brough up for the first time. */ qidx = 0; for_each_port(adapter, pidx) { struct port_info *pi = adap2pinfo(adapter, pidx); pi->first_qset = qidx; pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; qidx += pi->nqsets; } s->ethqsets = qidx; /* * The Ingress Queue Entry Size for our various Response Queues needs * to be big enough to accommodate the largest message we can receive * from the chip/firmware; which is 64 bytes ... */ iqe_size = 64; /* * Set up default Queue Set parameters ... Start off with the * shortest interrupt holdoff timer. */ for (qs = 0; qs < s->max_ethqsets; qs++) { struct sge_eth_rxq *rxq = &s->ethrxq[qs]; struct sge_eth_txq *txq = &s->ethtxq[qs]; init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); rxq->fl.size = 72; txq->q.size = 1024; } /* * The firmware event queue is used for link state changes and * notifications of TX DMA completions. */ init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); /* * The forwarded interrupt queue is used when we're in MSI interrupt * mode. In this mode all interrupts associated with RX queues will * be forwarded to a single queue which we'll associate with our MSI * interrupt vector. The messages dropped in the forwarded interrupt * queue will indicate which ingress queue needs servicing ... This * queue needs to be large enough to accommodate all of the ingress * queues which are forwarding their interrupt (+1 to prevent the PIDX * from equalling the CIDX if every ingress queue has an outstanding * interrupt). The queue doesn't need to be any larger because no * ingress queue will ever have more than one outstanding interrupt at * any time ... */ init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, iqe_size); } /* * Reduce the number of Ethernet queues across all ports to at most n. * n provides at least one queue per port. */ static void reduce_ethqs(struct adapter *adapter, int n) { int i; struct port_info *pi; /* * While we have too many active Ether Queue Sets, interate across the * "ports" and reduce their individual Queue Set allocations. */ BUG_ON(n < adapter->params.nports); while (n < adapter->sge.ethqsets) for_each_port(adapter, i) { pi = adap2pinfo(adapter, i); if (pi->nqsets > 1) { pi->nqsets--; adapter->sge.ethqsets--; if (adapter->sge.ethqsets <= n) break; } } /* * Reassign the starting Queue Sets for each of the "ports" ... */ n = 0; for_each_port(adapter, i) { pi = adap2pinfo(adapter, i); pi->first_qset = n; n += pi->nqsets; } } /* * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally * we get a separate MSI-X vector for every "Queue Set" plus any extras we * need. Minimally we need one for every Virtual Interface plus those needed * for our "extras". Note that this process may lower the maximum number of * allowed Queue Sets ... */ static int enable_msix(struct adapter *adapter) { int i, want, need, nqsets; struct msix_entry entries[MSIX_ENTRIES]; struct sge *s = &adapter->sge; for (i = 0; i < MSIX_ENTRIES; ++i) entries[i].entry = i; /* * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets" * plus those needed for our "extras" (for example, the firmware * message queue). We _need_ at least one "Queue Set" per Virtual * Interface plus those needed for our "extras". So now we get to see * if the song is right ... */ want = s->max_ethqsets + MSIX_EXTRAS; need = adapter->params.nports + MSIX_EXTRAS; want = pci_enable_msix_range(adapter->pdev, entries, need, want); if (want < 0) return want; nqsets = want - MSIX_EXTRAS; if (nqsets < s->max_ethqsets) { dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" " for %d Queue Sets\n", nqsets); s->max_ethqsets = nqsets; if (nqsets < s->ethqsets) reduce_ethqs(adapter, nqsets); } for (i = 0; i < want; ++i) adapter->msix_info[i].vec = entries[i].vector; return 0; } static const struct net_device_ops cxgb4vf_netdev_ops = { .ndo_open = cxgb4vf_open, .ndo_stop = cxgb4vf_stop, .ndo_start_xmit = t4vf_eth_xmit, .ndo_get_stats = cxgb4vf_get_stats, .ndo_set_rx_mode = cxgb4vf_set_rxmode, .ndo_set_mac_address = cxgb4vf_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = cxgb4vf_do_ioctl, .ndo_change_mtu = cxgb4vf_change_mtu, .ndo_fix_features = cxgb4vf_fix_features, .ndo_set_features = cxgb4vf_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb4vf_poll_controller, #endif }; /* * "Probe" a device: initialize a device and construct all kernel and driver * state needed to manage the device. This routine is called "init_one" in * the PF Driver ... */ static int cxgb4vf_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int pci_using_dac; int err, pidx; unsigned int pmask; struct adapter *adapter; struct port_info *pi; struct net_device *netdev; unsigned int pf; /* * Print our driver banner the first time we're called to initialize a * device. */ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); /* * Initialize generic PCI device state. */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "cannot enable PCI device\n"); return err; } /* * Reserve PCI resources for the device. If we can't get them some * other driver may have already claimed the device ... */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); goto err_disable_device; } /* * Set up our DMA mask: try for 64-bit address masking first and * fall back to 32-bit if we can't get 64 bits ... */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err == 0) { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" " coherent allocations\n"); goto err_release_regions; } pci_using_dac = 1; } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err != 0) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_release_regions; } pci_using_dac = 0; } /* * Enable bus mastering for the device ... */ pci_set_master(pdev); /* * Allocate our adapter data structure and attach it to the device. */ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { err = -ENOMEM; goto err_release_regions; } pci_set_drvdata(pdev, adapter); adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + (sizeof(struct mbox_cmd) * T4VF_OS_LOG_MBOX_CMDS), GFP_KERNEL); if (!adapter->mbox_log) { err = -ENOMEM; goto err_free_adapter; } adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS; /* * Initialize SMP data synchronization resources. */ spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->mbox_lock); INIT_LIST_HEAD(&adapter->mlist.list); /* * Map our I/O registers in BAR0. */ adapter->regs = pci_ioremap_bar(pdev, 0); if (!adapter->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); err = -ENOMEM; goto err_free_adapter; } /* Wait for the device to become ready before proceeding ... */ err = t4vf_prep_adapter(adapter); if (err) { dev_err(adapter->pdev_dev, "device didn't become ready:" " err=%d\n", err); goto err_unmap_bar0; } /* For T5 and later we want to use the new BAR-based User Doorbells, * so we need to map BAR2 here ... */ if (!is_t4(adapter->params.chip)) { adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); if (!adapter->bar2) { dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n"); err = -ENOMEM; goto err_unmap_bar0; } } /* * Initialize adapter level features. */ adapter->name = pci_name(pdev); adapter->msg_enable = DFLT_MSG_ENABLE; err = adap_init0(adapter); if (err) goto err_unmap_bar; /* * Allocate our "adapter ports" and stitch everything together. */ pmask = adapter->params.vfres.pmask; pf = t4vf_get_pf_from_vf(adapter); for_each_port(adapter, pidx) { int port_id, viid; u8 mac[ETH_ALEN]; unsigned int naddr = 1; /* * We simplistically allocate our virtual interfaces * sequentially across the port numbers to which we have * access rights. This should be configurable in some manner * ... */ if (pmask == 0) break; port_id = ffs(pmask) - 1; pmask &= ~(1 << port_id); viid = t4vf_alloc_vi(adapter, port_id); if (viid < 0) { dev_err(&pdev->dev, "cannot allocate VI for port %d:" " err=%d\n", port_id, viid); err = viid; goto err_free_dev; } /* * Allocate our network device and stitch things together. */ netdev = alloc_etherdev_mq(sizeof(struct port_info), MAX_PORT_QSETS); if (netdev == NULL) { t4vf_free_vi(adapter, viid); err = -ENOMEM; goto err_free_dev; } adapter->port[pidx] = netdev; SET_NETDEV_DEV(netdev, &pdev->dev); pi = netdev_priv(netdev); pi->adapter = adapter; pi->pidx = pidx; pi->port_id = port_id; pi->viid = viid; /* * Initialize the starting state of our "port" and register * it. */ pi->xact_addr_filt = -1; netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->min_mtu = 81; netdev->max_mtu = ETH_MAX_MTU; netdev->netdev_ops = &cxgb4vf_netdev_ops; netdev->ethtool_ops = &cxgb4vf_ethtool_ops; netdev->dev_port = pi->port_id; /* * Initialize the hardware/software state for the port. */ err = t4vf_port_init(adapter, pidx); if (err) { dev_err(&pdev->dev, "cannot initialize port %d\n", pidx); goto err_free_dev; } err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac); if (err) { dev_err(&pdev->dev, "unable to determine MAC ACL address, " "continuing anyway.. (status %d)\n", err); } else if (naddr && adapter->params.vfres.nvi == 1) { struct sockaddr addr; ether_addr_copy(addr.sa_data, mac); err = cxgb4vf_set_mac_addr(netdev, &addr); if (err) { dev_err(&pdev->dev, "unable to set MAC address %pM\n", mac); goto err_free_dev; } dev_info(&pdev->dev, "Using assigned MAC ACL: %pM\n", mac); } } /* See what interrupts we'll be using. If we've been configured to * use MSI-X interrupts, try to enable them but fall back to using * MSI interrupts if we can't enable MSI-X interrupts. If we can't * get MSI interrupts we bail with the error. */ if (msi == MSI_MSIX && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; else { if (msi == MSI_MSIX) { dev_info(adapter->pdev_dev, "Unable to use MSI-X Interrupts; falling " "back to MSI Interrupts\n"); /* We're going to need a Forwarded Interrupt Queue so * that may cut into how many Queue Sets we can * support. */ msi = MSI_MSI; size_nports_qsets(adapter); } err = pci_enable_msi(pdev); if (err) { dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;" " err=%d\n", err); goto err_free_dev; } adapter->flags |= USING_MSI; } /* Now that we know how many "ports" we have and what interrupt * mechanism we're going to use, we can configure our queue resources. */ cfg_queues(adapter); /* * The "card" is now ready to go. If any errors occur during device * registration we do not fail the whole "card" but rather proceed * only with the ports we manage to register successfully. However we * must register at least one net device. */ for_each_port(adapter, pidx) { struct port_info *pi = netdev_priv(adapter->port[pidx]); netdev = adapter->port[pidx]; if (netdev == NULL) continue; netif_set_real_num_tx_queues(netdev, pi->nqsets); netif_set_real_num_rx_queues(netdev, pi->nqsets); err = register_netdev(netdev); if (err) { dev_warn(&pdev->dev, "cannot register net device %s," " skipping\n", netdev->name); continue; } set_bit(pidx, &adapter->registered_device_map); } if (adapter->registered_device_map == 0) { dev_err(&pdev->dev, "could not register any net devices\n"); goto err_disable_interrupts; } /* * Set up our debugfs entries. */ if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), cxgb4vf_debugfs_root); if (IS_ERR_OR_NULL(adapter->debugfs_root)) dev_warn(&pdev->dev, "could not create debugfs" " directory"); else setup_debugfs(adapter); } /* * Print a short notice on the existence and configuration of the new * VF network device ... */ for_each_port(adapter, pidx) { dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n", adapter->port[pidx]->name, (adapter->flags & USING_MSIX) ? "MSI-X" : (adapter->flags & USING_MSI) ? "MSI" : ""); } /* * Return success! */ return 0; /* * Error recovery and exit code. Unwind state that's been created * so far and return the error. */ err_disable_interrupts: if (adapter->flags & USING_MSIX) { pci_disable_msix(adapter->pdev); adapter->flags &= ~USING_MSIX; } else if (adapter->flags & USING_MSI) { pci_disable_msi(adapter->pdev); adapter->flags &= ~USING_MSI; } err_free_dev: for_each_port(adapter, pidx) { netdev = adapter->port[pidx]; if (netdev == NULL) continue; pi = netdev_priv(netdev); t4vf_free_vi(adapter, pi->viid); if (test_bit(pidx, &adapter->registered_device_map)) unregister_netdev(netdev); free_netdev(netdev); } err_unmap_bar: if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); err_unmap_bar0: iounmap(adapter->regs); err_free_adapter: kfree(adapter->mbox_log); kfree(adapter); err_release_regions: pci_release_regions(pdev); pci_clear_master(pdev); err_disable_device: pci_disable_device(pdev); return err; } /* * "Remove" a device: tear down all kernel and driver state created in the * "probe" routine and quiesce the device (disable interrupts, etc.). (Note * that this is called "remove_one" in the PF Driver.) */ static void cxgb4vf_pci_remove(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); /* * Tear down driver state associated with device. */ if (adapter) { int pidx; /* * Stop all of our activity. Unregister network port, * disable interrupts, etc. */ for_each_port(adapter, pidx) if (test_bit(pidx, &adapter->registered_device_map)) unregister_netdev(adapter->port[pidx]); t4vf_sge_stop(adapter); if (adapter->flags & USING_MSIX) { pci_disable_msix(adapter->pdev); adapter->flags &= ~USING_MSIX; } else if (adapter->flags & USING_MSI) { pci_disable_msi(adapter->pdev); adapter->flags &= ~USING_MSI; } /* * Tear down our debugfs entries. */ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { cleanup_debugfs(adapter); debugfs_remove_recursive(adapter->debugfs_root); } /* * Free all of the various resources which we've acquired ... */ t4vf_free_sge_resources(adapter); for_each_port(adapter, pidx) { struct net_device *netdev = adapter->port[pidx]; struct port_info *pi; if (netdev == NULL) continue; pi = netdev_priv(netdev); t4vf_free_vi(adapter, pi->viid); free_netdev(netdev); } iounmap(adapter->regs); if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); kfree(adapter->mbox_log); kfree(adapter); } /* * Disable the device and release its PCI resources. */ pci_disable_device(pdev); pci_clear_master(pdev); pci_release_regions(pdev); } /* * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt * delivery. */ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) { struct adapter *adapter; int pidx; adapter = pci_get_drvdata(pdev); if (!adapter) return; /* Disable all Virtual Interfaces. This will shut down the * delivery of all ingress packets into the chip for these * Virtual Interfaces. */ for_each_port(adapter, pidx) if (test_bit(pidx, &adapter->registered_device_map)) unregister_netdev(adapter->port[pidx]); /* Free up all Queues which will prevent further DMA and * Interrupts allowing various internal pathways to drain. */ t4vf_sge_stop(adapter); if (adapter->flags & USING_MSIX) { pci_disable_msix(adapter->pdev); adapter->flags &= ~USING_MSIX; } else if (adapter->flags & USING_MSI) { pci_disable_msi(adapter->pdev); adapter->flags &= ~USING_MSI; } /* * Free up all Queues which will prevent further DMA and * Interrupts allowing various internal pathways to drain. */ t4vf_free_sge_resources(adapter); pci_set_drvdata(pdev, NULL); } /* Macros needed to support the PCI Device ID Table ... */ #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ static const struct pci_device_id cxgb4vf_pci_tbl[] = { #define CH_PCI_DEVICE_ID_FUNCTION 0x8 #define CH_PCI_ID_TABLE_ENTRY(devid) \ { PCI_VDEVICE(CHELSIO, (devid)), 0 } #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } #include "../cxgb4/t4_pci_id_tbl.h" MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl); static struct pci_driver cxgb4vf_driver = { .name = KBUILD_MODNAME, .id_table = cxgb4vf_pci_tbl, .probe = cxgb4vf_pci_probe, .remove = cxgb4vf_pci_remove, .shutdown = cxgb4vf_pci_shutdown, }; /* * Initialize global driver state. */ static int __init cxgb4vf_module_init(void) { int ret; /* * Vet our module parameters. */ if (msi != MSI_MSIX && msi != MSI_MSI) { pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, MSI_MSI); return -EINVAL; } /* Debugfs support is optional, just warn if this fails */ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) pr_warn("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4vf_driver); if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) debugfs_remove(cxgb4vf_debugfs_root); return ret; } /* * Tear down global driver state. */ static void __exit cxgb4vf_module_exit(void) { pci_unregister_driver(&cxgb4vf_driver); debugfs_remove(cxgb4vf_debugfs_root); } module_init(cxgb4vf_module_init); module_exit(cxgb4vf_module_exit);
null
null
null
null
93,172
2,272
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
167,267
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* RxRPC individual remote procedure call handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/module.h> #include <linux/circ_buf.h> #include <linux/spinlock_types.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { [RXRPC_CALL_UNINITIALISED] = "Uninit ", [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", [RXRPC_CALL_SERVER_SECURING] = "SvSecure", [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", [RXRPC_CALL_COMPLETE] = "Complete", }; const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { [RXRPC_CALL_SUCCEEDED] = "Complete", [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", [RXRPC_CALL_LOCAL_ERROR] = "LocError", [RXRPC_CALL_NETWORK_ERROR] = "NetError", }; struct kmem_cache *rxrpc_call_jar; LIST_HEAD(rxrpc_calls); DEFINE_RWLOCK(rxrpc_call_lock); static void rxrpc_call_timer_expired(unsigned long _call) { struct rxrpc_call *call = (struct rxrpc_call *)_call; _enter("%d", call->debug_id); if (call->state < RXRPC_CALL_COMPLETE) rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); } /* * find an extant server call * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, unsigned long user_call_ID) { struct rxrpc_call *call; struct rb_node *p; _enter("%p,%lx", rx, user_call_ID); read_lock(&rx->call_lock); p = rx->calls.rb_node; while (p) { call = rb_entry(p, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) p = p->rb_left; else if (user_call_ID > call->user_call_ID) p = p->rb_right; else goto found_extant_call; } read_unlock(&rx->call_lock); _leave(" = NULL"); return NULL; found_extant_call: rxrpc_get_call(call, rxrpc_call_got); read_unlock(&rx->call_lock); _leave(" = %p [%d]", call, atomic_read(&call->usage)); return call; } /* * allocate a new call */ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) { struct rxrpc_call *call; call = kmem_cache_zalloc(rxrpc_call_jar, gfp); if (!call) return NULL; call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(struct sk_buff *), gfp); if (!call->rxtx_buffer) goto nomem; call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp); if (!call->rxtx_annotations) goto nomem_2; mutex_init(&call->user_mutex); setup_timer(&call->timer, rxrpc_call_timer_expired, (unsigned long)call); INIT_WORK(&call->processor, &rxrpc_process_call); INIT_LIST_HEAD(&call->link); INIT_LIST_HEAD(&call->chan_wait_link); INIT_LIST_HEAD(&call->accept_link); INIT_LIST_HEAD(&call->recvmsg_link); INIT_LIST_HEAD(&call->sock_link); init_waitqueue_head(&call->waitq); spin_lock_init(&call->lock); rwlock_init(&call->state_lock); atomic_set(&call->usage, 1); call->debug_id = atomic_inc_return(&rxrpc_debug_id); memset(&call->sock_node, 0xed, sizeof(call->sock_node)); /* Leave space in the ring to handle a maxed-out jumbo packet */ call->rx_winsize = rxrpc_rx_window_size; call->tx_winsize = 16; call->rx_expect_next = 1; if (RXRPC_TX_SMSS > 2190) call->cong_cwnd = 2; else if (RXRPC_TX_SMSS > 1095) call->cong_cwnd = 3; else call->cong_cwnd = 4; call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; return call; nomem_2: kfree(call->rxtx_buffer); nomem: kmem_cache_free(rxrpc_call_jar, call); return NULL; } /* * Allocate a new client call. */ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, gfp_t gfp) { struct rxrpc_call *call; ktime_t now; _enter(""); call = rxrpc_alloc_call(gfp); if (!call) return ERR_PTR(-ENOMEM); call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; call->service_id = srx->srx_service; call->tx_phase = true; now = ktime_get_real(); call->acks_latest_ts = now; call->cong_tstamp = now; _leave(" = %p", call); return call; } /* * Initiate the call ack/resend/expiry timer. */ static void rxrpc_start_call_timer(struct rxrpc_call *call) { ktime_t now = ktime_get_real(), expire_at; expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); call->expire_at = expire_at; call->ack_at = expire_at; call->ping_at = expire_at; call->resend_at = expire_at; call->timer.expires = jiffies + LONG_MAX / 2; rxrpc_set_timer(call, rxrpc_timer_begin, now); } /* * Set up a call for the given parameters. * - Called with the socket lock held, which it must release. * - If it returns a call, the call's lock will need releasing by the caller. */ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, struct rxrpc_conn_parameters *cp, struct sockaddr_rxrpc *srx, unsigned long user_call_ID, gfp_t gfp) __releases(&rx->sk.sk_lock.slock) { struct rxrpc_call *call, *xcall; struct rb_node *parent, **pp; const void *here = __builtin_return_address(0); int ret; _enter("%p,%lx", rx, user_call_ID); call = rxrpc_alloc_client_call(srx, gfp); if (IS_ERR(call)) { release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(call)); return call; } trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), here, (const void *)user_call_ID); /* We need to protect a partially set up call against the user as we * will be acting outside the socket lock. */ mutex_lock(&call->user_mutex); /* Publish the call, even though it is incompletely set up as yet */ write_lock(&rx->call_lock); pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < xcall->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > xcall->user_call_ID) pp = &(*pp)->rb_right; else goto error_dup_user_ID; } rcu_assign_pointer(call->socket, rx); call->user_call_ID = user_call_ID; __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); rxrpc_get_call(call, rxrpc_call_got_userid); rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); list_add(&call->sock_link, &rx->sock_calls); write_unlock(&rx->call_lock); write_lock(&rxrpc_call_lock); list_add_tail(&call->link, &rxrpc_calls); write_unlock(&rxrpc_call_lock); /* From this point on, the call is protected by its own lock. */ release_sock(&rx->sk); /* Set up or get a connection record and set the protocol parameters, * including channel number and call ID. */ ret = rxrpc_connect_call(call, cp, srx, gfp); if (ret < 0) goto error; trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); spin_lock_bh(&call->conn->params.peer->lock); hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); spin_unlock_bh(&call->conn->params.peer->lock); rxrpc_start_call_timer(call); _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); _leave(" = %p [new]", call); return call; /* We unexpectedly found the user ID in the list after taking * the call_lock. This shouldn't happen unless the user races * with itself and tries to add the same user ID twice at the * same time in different threads. */ error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); ret = -EEXIST; error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, RX_CALL_DEAD, ret); trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), here, ERR_PTR(ret)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); _leave(" = %d", ret); return ERR_PTR(ret); } /* * Set up an incoming call. call->conn points to the connection. * This is called in BH context and isn't allowed to fail. */ void rxrpc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_connection *conn = call->conn; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); u32 chan; _enter(",%d", call->conn->debug_id); rcu_assign_pointer(call->socket, rx); call->call_id = sp->hdr.callNumber; call->service_id = sp->hdr.serviceId; call->cid = sp->hdr.cid; call->state = RXRPC_CALL_SERVER_ACCEPTING; if (sp->hdr.securityIndex > 0) call->state = RXRPC_CALL_SERVER_SECURING; call->cong_tstamp = skb->tstamp; /* Set the channel for this call. We don't get channel_lock as we're * only defending against the data_ready handler (which we're called * from) and the RESPONSE packet parser (which is only really * interested in call_counter and can cope with a disagreement with the * call pointer). */ chan = sp->hdr.cid & RXRPC_CHANNELMASK; conn->channels[chan].call_counter = call->call_id; conn->channels[chan].call_id = call->call_id; rcu_assign_pointer(conn->channels[chan].call, call); spin_lock(&conn->params.peer->lock); hlist_add_head(&call->error_link, &conn->params.peer->error_targets); spin_unlock(&conn->params.peer->lock); _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); rxrpc_start_call_timer(call); _leave(""); } /* * Queue a call's work processor, getting a ref to pass to the work queue. */ bool rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); int n = __atomic_add_unless(&call->usage, 1, 0); if (n == 0) return false; if (rxrpc_queue_work(&call->processor)) trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL); else rxrpc_put_call(call, rxrpc_call_put_noqueue); return true; } /* * Queue a call's work processor, passing the callers ref to the work queue. */ bool __rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); int n = atomic_read(&call->usage); ASSERTCMP(n, >=, 1); if (rxrpc_queue_work(&call->processor)) trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL); else rxrpc_put_call(call, rxrpc_call_put_noqueue); return true; } /* * Note the re-emergence of a call. */ void rxrpc_see_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); if (call) { int n = atomic_read(&call->usage); trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL); } } /* * Note the addition of a ref on a call. */ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) { const void *here = __builtin_return_address(0); int n = atomic_inc_return(&call->usage); trace_rxrpc_call(call, op, n, here, NULL); } /* * Detach a call from its owning socket. */ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) { const void *here = __builtin_return_address(0); struct rxrpc_connection *conn = call->conn; bool put = false; int i; _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), here, (const void *)call->flags); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); spin_lock_bh(&call->lock); if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) BUG(); spin_unlock_bh(&call->lock); del_timer_sync(&call->timer); /* Make sure we don't get any more notifications */ write_lock_bh(&rx->recvmsg_lock); if (!list_empty(&call->recvmsg_link)) { _debug("unlinking once-pending call %p { e=%lx f=%lx }", call, call->events, call->flags); list_del(&call->recvmsg_link); put = true; } /* list_empty() must return false in rxrpc_notify_socket() */ call->recvmsg_link.next = NULL; call->recvmsg_link.prev = NULL; write_unlock_bh(&rx->recvmsg_lock); if (put) rxrpc_put_call(call, rxrpc_call_put); write_lock(&rx->call_lock); if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { rb_erase(&call->sock_node, &rx->calls); memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); rxrpc_put_call(call, rxrpc_call_put_userid); } list_del(&call->sock_link); write_unlock(&rx->call_lock); _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); if (conn) rxrpc_disconnect_call(call); for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { rxrpc_free_skb(call->rxtx_buffer[i], (call->tx_phase ? rxrpc_skb_tx_cleaned : rxrpc_skb_rx_cleaned)); call->rxtx_buffer[i] = NULL; } _leave(""); } /* * release all the calls associated with a socket */ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) { struct rxrpc_call *call; _enter("%p", rx); while (!list_empty(&rx->to_be_accepted)) { call = list_entry(rx->to_be_accepted.next, struct rxrpc_call, accept_link); list_del(&call->accept_link); rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, ECONNRESET); rxrpc_put_call(call, rxrpc_call_put); } while (!list_empty(&rx->sock_calls)) { call = list_entry(rx->sock_calls.next, struct rxrpc_call, sock_link); rxrpc_get_call(call, rxrpc_call_got); rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET); rxrpc_send_abort_packet(call); rxrpc_release_call(rx, call); rxrpc_put_call(call, rxrpc_call_put); } _leave(""); } /* * release a call */ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) { const void *here = __builtin_return_address(0); int n; ASSERT(call != NULL); n = atomic_dec_return(&call->usage); trace_rxrpc_call(call, op, n, here, NULL); ASSERTCMP(n, >=, 0); if (n == 0) { _debug("call %d dead", call->debug_id); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); write_lock(&rxrpc_call_lock); list_del_init(&call->link); write_unlock(&rxrpc_call_lock); rxrpc_cleanup_call(call); } } /* * Final call destruction under RCU. */ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) { struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); rxrpc_put_peer(call->peer); kfree(call->rxtx_buffer); kfree(call->rxtx_annotations); kmem_cache_free(rxrpc_call_jar, call); } /* * clean up a call */ void rxrpc_cleanup_call(struct rxrpc_call *call) { int i; _net("DESTROY CALL %d", call->debug_id); memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); del_timer_sync(&call->timer); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); ASSERTCMP(call->conn, ==, NULL); /* Clean up the Rx/Tx buffer */ for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) rxrpc_free_skb(call->rxtx_buffer[i], (call->tx_phase ? rxrpc_skb_tx_cleaned : rxrpc_skb_rx_cleaned)); rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); call_rcu(&call->rcu, rxrpc_rcu_destroy_call); } /* * Make sure that all calls are gone. */ void __exit rxrpc_destroy_all_calls(void) { struct rxrpc_call *call; _enter(""); if (list_empty(&rxrpc_calls)) return; write_lock(&rxrpc_call_lock); while (!list_empty(&rxrpc_calls)) { call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); _debug("Zapping call %p", call); rxrpc_see_call(call); list_del_init(&call->link); pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", call, atomic_read(&call->usage), rxrpc_call_states[call->state], call->flags, call->events); write_unlock(&rxrpc_call_lock); cond_resched(); write_lock(&rxrpc_call_lock); } write_unlock(&rxrpc_call_lock); }
null
null
null
null
75,615
752
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
257,139
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include "internal/cryptlib.h" #include <openssl/asn1t.h> #include <openssl/pem.h> #include <openssl/x509v3.h> #include <openssl/err.h> #include <openssl/cms.h> #include <openssl/bio.h> #include <openssl/comp.h> #include "cms_lcl.h" #ifdef ZLIB /* CMS CompressedData Utilities */ CMS_ContentInfo *cms_CompressedData_create(int comp_nid) { CMS_ContentInfo *cms; CMS_CompressedData *cd; /* * Will need something cleverer if there is ever more than one * compression algorithm or parameters have some meaning... */ if (comp_nid != NID_zlib_compression) { CMSerr(CMS_F_CMS_COMPRESSEDDATA_CREATE, CMS_R_UNSUPPORTED_COMPRESSION_ALGORITHM); return NULL; } cms = CMS_ContentInfo_new(); if (cms == NULL) return NULL; cd = M_ASN1_new_of(CMS_CompressedData); if (cd == NULL) goto err; cms->contentType = OBJ_nid2obj(NID_id_smime_ct_compressedData); cms->d.compressedData = cd; cd->version = 0; X509_ALGOR_set0(cd->compressionAlgorithm, OBJ_nid2obj(NID_zlib_compression), V_ASN1_UNDEF, NULL); cd->encapContentInfo->eContentType = OBJ_nid2obj(NID_pkcs7_data); return cms; err: CMS_ContentInfo_free(cms); return NULL; } BIO *cms_CompressedData_init_bio(CMS_ContentInfo *cms) { CMS_CompressedData *cd; const ASN1_OBJECT *compoid; if (OBJ_obj2nid(cms->contentType) != NID_id_smime_ct_compressedData) { CMSerr(CMS_F_CMS_COMPRESSEDDATA_INIT_BIO, CMS_R_CONTENT_TYPE_NOT_COMPRESSED_DATA); return NULL; } cd = cms->d.compressedData; X509_ALGOR_get0(&compoid, NULL, NULL, cd->compressionAlgorithm); if (OBJ_obj2nid(compoid) != NID_zlib_compression) { CMSerr(CMS_F_CMS_COMPRESSEDDATA_INIT_BIO, CMS_R_UNSUPPORTED_COMPRESSION_ALGORITHM); return NULL; } return BIO_new(BIO_f_zlib()); } #endif
null
null
null
null
118,584
63,124
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
63,124
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/strings/utf_string_conversions.h" #include "base/test/scoped_feature_list.h" #include "build/build_config.h" #include "chrome/browser/ui/views/harmony/chrome_layout_provider.h" #include "chrome/browser/ui/views/harmony/chrome_typography.h" #include "chrome/browser/ui/views/harmony/harmony_typography_provider.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/base/default_style.h" #include "ui/base/resource/resource_bundle.h" #include "ui/base/ui_base_features.h" #include "ui/gfx/font_list.h" #include "ui/views/controls/label.h" #include "ui/views/controls/styled_label.h" #include "ui/views/style/typography.h" #include "ui/views/style/typography_provider.h" #if defined(OS_MACOSX) #include "base/mac/mac_util.h" #endif #if defined(OS_WIN) #include "base/win/windows_version.h" #include "ui/gfx/win/direct_write.h" #endif namespace { // Constant from the Harmony spec. constexpr int kHarmonyTitleSize = 15; } // namespace class LayoutProviderTest : public testing::Test { public: LayoutProviderTest() {} #if defined(OS_WIN) protected: static void SetUpTestCase() { // The expected case is to have DirectWrite enabled; the fallback gives // different font heights. However, only use DirectWrite on Windows 10 and // later, since it's known to have flaky results on Windows 7. See // http://crbug.com/759870. if (base::win::GetVersion() >= base::win::VERSION_WIN10) gfx::win::MaybeInitializeDirectWrite(); } #endif private: DISALLOW_COPY_AND_ASSIGN(LayoutProviderTest); }; // Check legacy font sizes. No new code should be using these constants, but if // these tests ever fail it probably means something in the old UI will have // changed by mistake. // Disabled since this relies on machine configuration. http://crbug.com/701241. TEST_F(LayoutProviderTest, DISABLED_LegacyFontSizeConstants) { ui::ResourceBundle& rb = ui::ResourceBundle::GetSharedInstance(); gfx::FontList label_font = rb.GetFontListWithDelta(ui::kLabelFontSizeDelta); EXPECT_EQ(12, label_font.GetFontSize()); EXPECT_EQ(15, label_font.GetHeight()); EXPECT_EQ(12, label_font.GetBaseline()); EXPECT_EQ(9, label_font.GetCapHeight()); // Note some Windows bots report 11,13,11,9 for the above. // TODO(tapted): Smoke them out and figure out why. #if defined(OS_MACOSX) if (base::mac::IsOS10_9()) { EXPECT_EQ(6, label_font.GetExpectedTextWidth(1)); } else { EXPECT_EQ(10, label_font.GetExpectedTextWidth(1)); } #else EXPECT_EQ(6, label_font.GetExpectedTextWidth(1)); // Some Windows bots may say 5. #endif gfx::FontList title_font = rb.GetFontListWithDelta(ui::kTitleFontSizeDelta); #if defined(OS_WIN) EXPECT_EQ(15, title_font.GetFontSize()); EXPECT_EQ(20, title_font.GetHeight()); EXPECT_EQ(17, title_font.GetBaseline()); EXPECT_EQ(11, title_font.GetCapHeight()); #elif defined(OS_MACOSX) EXPECT_EQ(14, title_font.GetFontSize()); EXPECT_EQ(17, title_font.GetHeight()); EXPECT_EQ(14, title_font.GetBaseline()); if (base::mac::IsOS10_9()) { EXPECT_EQ(11, title_font.GetCapHeight()); } else { EXPECT_EQ(10, title_font.GetCapHeight()); } #else EXPECT_EQ(15, title_font.GetFontSize()); EXPECT_EQ(18, title_font.GetHeight()); EXPECT_EQ(14, title_font.GetBaseline()); EXPECT_EQ(11, title_font.GetCapHeight()); #endif #if defined(OS_MACOSX) if (base::mac::IsOS10_9()) { EXPECT_EQ(7, title_font.GetExpectedTextWidth(1)); } else { EXPECT_EQ(12, title_font.GetExpectedTextWidth(1)); } #else EXPECT_EQ(8, title_font.GetExpectedTextWidth(1)); #endif gfx::FontList small_font = rb.GetFontList(ui::ResourceBundle::SmallFont); gfx::FontList base_font = rb.GetFontList(ui::ResourceBundle::BaseFont); gfx::FontList bold_font = rb.GetFontList(ui::ResourceBundle::BoldFont); gfx::FontList medium_font = rb.GetFontList(ui::ResourceBundle::MediumFont); gfx::FontList medium_bold_font = rb.GetFontList(ui::ResourceBundle::MediumBoldFont); gfx::FontList large_font = rb.GetFontList(ui::ResourceBundle::LargeFont); #if defined(OS_MACOSX) EXPECT_EQ(12, small_font.GetFontSize()); EXPECT_EQ(13, base_font.GetFontSize()); EXPECT_EQ(13, bold_font.GetFontSize()); EXPECT_EQ(16, medium_font.GetFontSize()); EXPECT_EQ(16, medium_bold_font.GetFontSize()); EXPECT_EQ(21, large_font.GetFontSize()); #else EXPECT_EQ(11, small_font.GetFontSize()); EXPECT_EQ(12, base_font.GetFontSize()); EXPECT_EQ(12, bold_font.GetFontSize()); EXPECT_EQ(15, medium_font.GetFontSize()); EXPECT_EQ(15, medium_bold_font.GetFontSize()); EXPECT_EQ(20, large_font.GetFontSize()); #endif } // Check that asking for fonts of a given size match the Harmony spec. If these // tests fail, the Harmony TypographyProvider needs to be updated to handle the // new font properties. For example, when title_font.GetHeight() returns 19, the // Harmony TypographyProvider adds 3 to obtain its target height of 22. If a // platform starts returning 18 in a standard configuration then the // TypographyProvider must add 4 instead. We do this so that Chrome adapts // correctly to _non-standard_ system font configurations on user machines. // Disabled since this relies on machine configuration. http://crbug.com/701241. TEST_F(LayoutProviderTest, DISABLED_RequestFontBySize) { #if defined(OS_MACOSX) constexpr int kBase = 13; #else constexpr int kBase = 12; #endif // Harmony spec. constexpr int kHeadline = 20; constexpr int kTitle = kHarmonyTitleSize; // Leading 22. constexpr int kBody1 = 13; // Leading 20. constexpr int kBody2 = 12; // Leading 20. constexpr int kButton = 12; #if defined(OS_WIN) constexpr gfx::Font::Weight kButtonWeight = gfx::Font::Weight::BOLD; #else constexpr gfx::Font::Weight kButtonWeight = gfx::Font::Weight::MEDIUM; #endif ui::ResourceBundle& rb = ui::ResourceBundle::GetSharedInstance(); gfx::FontList headline_font = rb.GetFontListWithDelta(kHeadline - kBase); gfx::FontList title_font = rb.GetFontListWithDelta(kTitle - kBase); gfx::FontList body1_font = rb.GetFontListWithDelta(kBody1 - kBase); gfx::FontList body2_font = rb.GetFontListWithDelta(kBody2 - kBase); gfx::FontList button_font = rb.GetFontListWithDelta( kButton - kBase, gfx::Font::NORMAL, kButtonWeight); // The following checks on leading don't need to match the spec. Instead, it // means Label::SetLineHeight() needs to be used to increase it. But what we // are really interested in is the delta between GetFontSize() and GetHeight() // since that (plus a fixed constant) determines how the leading should change // when a larger font is configured in the OS. EXPECT_EQ(kHeadline, headline_font.GetFontSize()); // Headline leading not specified (multiline should be rare). #if defined(OS_MACOSX) EXPECT_EQ(25, headline_font.GetHeight()); #elif defined(OS_WIN) EXPECT_EQ(HarmonyTypographyProvider::GetPlatformFontHeight(CONTEXT_HEADLINE), headline_font.GetHeight()); #else EXPECT_EQ(24, headline_font.GetHeight()); #endif EXPECT_EQ(kTitle, title_font.GetFontSize()); // Title font leading should be 22. #if defined(OS_MACOSX) EXPECT_EQ(19, title_font.GetHeight()); // i.e. Add 3 to obtain line height. #elif defined(OS_WIN) EXPECT_EQ(20, title_font.GetHeight()); // Add 2. #else EXPECT_EQ(18, title_font.GetHeight()); // Add 4. #endif EXPECT_EQ(kBody1, body1_font.GetFontSize()); // Body1 font leading should be 20. #if defined(OS_MACOSX) EXPECT_EQ(16, body1_font.GetHeight()); // Add 4. #elif defined(OS_WIN) EXPECT_EQ( HarmonyTypographyProvider::GetPlatformFontHeight(CONTEXT_BODY_TEXT_LARGE), body1_font.GetHeight()); #else // Linux. EXPECT_EQ(17, body1_font.GetHeight()); // Add 3. #endif EXPECT_EQ(kBody2, body2_font.GetFontSize()); // Body2 font leading should be 20. #if defined(OS_WIN) EXPECT_EQ( HarmonyTypographyProvider::GetPlatformFontHeight(CONTEXT_BODY_TEXT_SMALL), body2_font.GetHeight()); #else EXPECT_EQ(15, body2_font.GetHeight()); // Other platforms: Add 5. #endif EXPECT_EQ(kButton, button_font.GetFontSize()); // Button leading not specified (shouldn't be needed: no multiline buttons). #if defined(OS_WIN) EXPECT_EQ( HarmonyTypographyProvider::GetPlatformFontHeight(CONTEXT_BODY_TEXT_SMALL), button_font.GetHeight()); #else EXPECT_EQ(15, button_font.GetHeight()); #endif } // Test that the default TypographyProvider correctly maps TextContexts relative // to the "base" font in the manner that legacy toolkit-views code expects. This // reads the base font configuration at runtime, and only tests font sizes, so // should be robust against platform changes. TEST_F(LayoutProviderTest, FontSizeRelativeToBase) { using views::style::GetFont; constexpr int kStyle = views::style::STYLE_PRIMARY; // Typography described in chrome_typography.h requires a // ChromeLayoutProvider. ChromeLayoutProvider layout_provider; // Legacy code measures everything relative to a default-constructed FontList. // On Mac, subtract one since that is 13pt instead of 12pt. #if defined(OS_MACOSX) const int twelve = gfx::FontList().GetFontSize() - 1; #else const int twelve = gfx::FontList().GetFontSize(); #endif EXPECT_EQ(twelve, GetFont(CONTEXT_BODY_TEXT_SMALL, kStyle).GetFontSize()); EXPECT_EQ(twelve, GetFont(views::style::CONTEXT_LABEL, kStyle).GetFontSize()); EXPECT_EQ(twelve, GetFont(views::style::CONTEXT_TEXTFIELD, kStyle).GetFontSize()); EXPECT_EQ(twelve, GetFont(views::style::CONTEXT_BUTTON, kStyle).GetFontSize()); #if defined(OS_MACOSX) // We never exposed UI on Mac using these constants so it doesn't matter that // they are different. They only need to match under Harmony. EXPECT_EQ(twelve + 9, GetFont(CONTEXT_HEADLINE, kStyle).GetFontSize()); EXPECT_EQ(twelve + 2, GetFont(views::style::CONTEXT_DIALOG_TITLE, kStyle).GetFontSize()); EXPECT_EQ(twelve + 2, GetFont(CONTEXT_BODY_TEXT_LARGE, kStyle).GetFontSize()); EXPECT_EQ(twelve, GetFont(CONTEXT_DEPRECATED_SMALL, kStyle).GetFontSize()); #else // E.g. Headline should give a 20pt font. EXPECT_EQ(twelve + 8, GetFont(CONTEXT_HEADLINE, kStyle).GetFontSize()); // Titles should be 15pt. Etc. EXPECT_EQ(twelve + 3, GetFont(views::style::CONTEXT_DIALOG_TITLE, kStyle).GetFontSize()); EXPECT_EQ(twelve + 1, GetFont(CONTEXT_BODY_TEXT_LARGE, kStyle).GetFontSize()); EXPECT_EQ(twelve - 1, GetFont(CONTEXT_DEPRECATED_SMALL, kStyle).GetFontSize()); #endif } // Ensure that line height can be overridden by Chrome's TypographyProvider for // for the standard set of styles. This varies by platform and test machine // configuration. Generally, for a particular platform configuration, there // should be a consistent increase in line height when compared to the height of // a given font. TEST_F(LayoutProviderTest, TypographyLineHeight) { constexpr int kStyle = views::style::STYLE_PRIMARY; // Only MD overrides the default line spacing. base::test::ScopedFeatureList scoped_feature_list; scoped_feature_list.InitAndEnableFeature(features::kSecondaryUiMd); std::unique_ptr<views::LayoutProvider> layout_provider = ChromeLayoutProvider::CreateLayoutProvider(); constexpr struct { int context; int min; int max; } kExpectedIncreases[] = {{CONTEXT_HEADLINE, 4, 8}, {views::style::CONTEXT_DIALOG_TITLE, 1, 4}, {CONTEXT_BODY_TEXT_LARGE, 2, 4}, {CONTEXT_BODY_TEXT_SMALL, 4, 5}}; for (size_t i = 0; i < arraysize(kExpectedIncreases); ++i) { SCOPED_TRACE(testing::Message() << "Testing index: " << i); const auto& increase = kExpectedIncreases[i]; const gfx::FontList& font = views::style::GetFont(increase.context, kStyle); int line_spacing = views::style::GetLineHeight(increase.context, kStyle); EXPECT_GE(increase.max, line_spacing - font.GetHeight()); EXPECT_LE(increase.min, line_spacing - font.GetHeight()); } // Buttons should specify zero line height (i.e. use the font's height) so // buttons have flexibility to configure their own spacing. EXPECT_EQ(0, views::style::GetLineHeight(views::style::CONTEXT_BUTTON, kStyle)); EXPECT_EQ( 0, views::style::GetLineHeight(views::style::CONTEXT_BUTTON_MD, kStyle)); } // Ensure that line heights reported in a default bot configuration match the // Harmony spec. This test will only run if it detects that the current machine // has the default OS configuration. TEST_F(LayoutProviderTest, ExplicitTypographyLineHeight) { base::test::ScopedFeatureList scoped_feature_list; scoped_feature_list.InitAndEnableFeature(features::kSecondaryUiMd); std::unique_ptr<views::LayoutProvider> layout_provider = ChromeLayoutProvider::CreateLayoutProvider(); constexpr int kStyle = views::style::STYLE_PRIMARY; if (views::style::GetFont(views::style::CONTEXT_DIALOG_TITLE, kStyle) .GetFontSize() != kHarmonyTitleSize) { LOG(WARNING) << "Skipping: Test machine not in default configuration."; return; } // Line heights from the Harmony spec. constexpr int kBodyLineHeight = 20; constexpr struct { int context; int line_height; } kHarmonyHeights[] = {{CONTEXT_HEADLINE, 32}, {views::style::CONTEXT_DIALOG_TITLE, 22}, {CONTEXT_BODY_TEXT_LARGE, kBodyLineHeight}, {CONTEXT_BODY_TEXT_SMALL, kBodyLineHeight}}; for (size_t i = 0; i < arraysize(kHarmonyHeights); ++i) { SCOPED_TRACE(testing::Message() << "Testing index: " << i); EXPECT_EQ(kHarmonyHeights[i].line_height, views::style::GetLineHeight(kHarmonyHeights[i].context, kStyle)); views::Label label(base::ASCIIToUTF16("test"), kHarmonyHeights[i].context); label.SizeToPreferredSize(); EXPECT_EQ(kHarmonyHeights[i].line_height, label.height()); } // TODO(tapted): Pass in contexts to StyledLabel instead. Currently they are // stuck on style::CONTEXT_LABEL. That only matches the default line height in // HarmonyTypographyProvider::GetLineHeight(), which is body text. EXPECT_EQ(kBodyLineHeight, views::style::GetLineHeight(views::style::CONTEXT_LABEL, kStyle)); views::StyledLabel styled_label(base::ASCIIToUTF16("test"), nullptr); constexpr int kStyledLabelWidth = 200; // Enough to avoid wrapping. styled_label.SizeToFit(kStyledLabelWidth); EXPECT_EQ(kBodyLineHeight, styled_label.height()); // Adding a link should not change the size. styled_label.AddStyleRange( gfx::Range(0, 2), views::StyledLabel::RangeStyleInfo::CreateForLink()); styled_label.SizeToFit(kStyledLabelWidth); EXPECT_EQ(kBodyLineHeight, styled_label.height()); }
null
null
null
null
59,987
71,207
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
71,207
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SERVICES_PREFERENCES_PUBLIC_CPP_DICTIONARY_VALUE_UPDATE_H_ #define SERVICES_PREFERENCES_PUBLIC_CPP_DICTIONARY_VALUE_UPDATE_H_ #include <memory> #include <string> #include <vector> #include "base/callback.h" #include "base/strings/string16.h" #include "base/strings/string_piece.h" #include "services/preferences/public/cpp/scoped_pref_update.h" namespace base { class DictionaryValue; class ListValue; class Value; } namespace prefs { // A wrapper around base::DictionaryValue that reports changes to its contents // via a callback. class DictionaryValueUpdate { public: using UpdateCallback = base::Callback<void(const std::vector<std::string>&)>; DictionaryValueUpdate(UpdateCallback report_update, base::DictionaryValue* value, std::vector<std::string> path); ~DictionaryValueUpdate(); bool HasKey(base::StringPiece key) const; // Returns the number of Values in this dictionary. size_t size() const; // Returns whether the dictionary is empty. bool empty() const; // Clears any current contents of this dictionary. void Clear(); // Sets the Value associated with the given path starting from this object. // A path has the form "<key>" or "<key>.<key>.[...]", where "." indexes // into the next DictionaryValue down. Obviously, "." can't be used // within a key, but there are no other restrictions on keys. // If the key at any step of the way doesn't exist, or exists but isn't // a DictionaryValue, a new DictionaryValue will be created and attached // to the path in that location. |in_value| must be non-null. void Set(base::StringPiece path, std::unique_ptr<base::Value> in_value); // This is similar to |Set|, but lets callers explicitly specify the path // components and thus allows nested keys with periods in them. void SetPath(std::initializer_list<base::StringPiece> path, base::Value value); // Convenience forms of Set(). These methods will replace any existing // value at that path, even if it has a different type. void SetBoolean(base::StringPiece path, bool in_value); void SetInteger(base::StringPiece path, int in_value); void SetDouble(base::StringPiece path, double in_value); void SetString(base::StringPiece path, base::StringPiece in_value); void SetString(base::StringPiece path, const base::string16& in_value); std::unique_ptr<DictionaryValueUpdate> SetDictionary( base::StringPiece path, std::unique_ptr<base::DictionaryValue> in_value); // Like Set(), but without special treatment of '.'. This allows e.g. URLs to // be used as paths. void SetKey(base::StringPiece key, base::Value value); void SetWithoutPathExpansion(base::StringPiece key, std::unique_ptr<base::Value> in_value); // Convenience forms of SetWithoutPathExpansion(). std::unique_ptr<DictionaryValueUpdate> SetDictionaryWithoutPathExpansion( base::StringPiece path, std::unique_ptr<base::DictionaryValue> in_value); // These are convenience forms of Get(). The value will be retrieved // and the return value will be true if the path is valid and the value at // the end of the path can be returned in the form specified. // |out_value| is optional and will only be set if non-NULL. bool GetBoolean(base::StringPiece path, bool* out_value) const; bool GetInteger(base::StringPiece path, int* out_value) const; // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as // doubles. bool GetDouble(base::StringPiece path, double* out_value) const; bool GetString(base::StringPiece path, std::string* out_value) const; bool GetString(base::StringPiece path, base::string16* out_value) const; bool GetDictionary(base::StringPiece path, const base::DictionaryValue** out_value) const; bool GetDictionary(base::StringPiece path, std::unique_ptr<DictionaryValueUpdate>* out_value); bool GetList(base::StringPiece path, const base::ListValue** out_value) const; bool GetList(base::StringPiece path, base::ListValue** out_value); // Like Get(), but without special treatment of '.'. This allows e.g. URLs to // be used as paths. bool GetBooleanWithoutPathExpansion(base::StringPiece key, bool* out_value) const; bool GetIntegerWithoutPathExpansion(base::StringPiece key, int* out_value) const; bool GetDoubleWithoutPathExpansion(base::StringPiece key, double* out_value) const; bool GetStringWithoutPathExpansion(base::StringPiece key, std::string* out_value) const; bool GetStringWithoutPathExpansion(base::StringPiece key, base::string16* out_value) const; bool GetDictionaryWithoutPathExpansion( base::StringPiece key, const base::DictionaryValue** out_value) const; bool GetDictionaryWithoutPathExpansion( base::StringPiece key, std::unique_ptr<DictionaryValueUpdate>* out_value); bool GetListWithoutPathExpansion(base::StringPiece key, const base::ListValue** out_value) const; bool GetListWithoutPathExpansion(base::StringPiece key, base::ListValue** out_value); // Removes the Value with the specified path from this dictionary (or one // of its child dictionaries, if the path is more than just a local key). // If |out_value| is non-NULL, the removed Value will be passed out via // |out_value|. If |out_value| is NULL, the removed value will be deleted. // This method returns true if |path| is a valid path; otherwise it will // return false and the DictionaryValue object will be unchanged. bool Remove(base::StringPiece path, std::unique_ptr<base::Value>* out_value); // Like Remove(), but without special treatment of '.'. This allows e.g. URLs // to be used as paths. bool RemoveWithoutPathExpansion(base::StringPiece key, std::unique_ptr<base::Value>* out_value); // Removes a path, clearing out all dictionaries on |path| that remain empty // after removing the value at |path|. bool RemovePath(base::StringPiece path, std::unique_ptr<base::Value>* out_value); base::DictionaryValue* AsDictionary(); const base::DictionaryValue* AsConstDictionary() const; private: void RecordPath(base::StringPiece path); void RecordSplitPath(const std::vector<base::StringPiece>& path); void RecordKey(base::StringPiece key); std::vector<base::StringPiece> SplitPath(base::StringPiece path); std::vector<std::string> ConcatPath(const std::vector<std::string>& base_path, base::StringPiece path); std::vector<std::string> ConcatPath( const std::vector<std::string>& base_path, const std::vector<base::StringPiece>& path); UpdateCallback report_update_; base::DictionaryValue* const value_; const std::vector<std::string> path_; DISALLOW_COPY_AND_ASSIGN(DictionaryValueUpdate); }; } // namespace prefs #endif // SERVICES_PREFERENCES_PUBLIC_CPP_DICTIONARY_VALUE_UPDATE_H_
null
null
null
null
68,070
34,791
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
199,786
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * STK1160 driver * * Copyright (C) 2012 Ezequiel Garcia * <elezegarcia--a.t--gmail.com> * * Based on Easycap driver by R.M. Thomas * Copyright (C) 2010 R.M. Thomas * <rmthomas--a.t--sciolus.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/i2c.h> #include <sound/core.h> #include <sound/ac97_codec.h> #include <media/videobuf2-v4l2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #define STK1160_VERSION "0.9.5" #define STK1160_VERSION_NUM 0x000905 /* Decide on number of packets for each buffer */ #define STK1160_NUM_PACKETS 64 /* Number of buffers for isoc transfers */ #define STK1160_NUM_BUFS 16 #define STK1160_MIN_BUFS 1 /* TODO: This endpoint address should be retrieved */ #define STK1160_EP_VIDEO 0x82 #define STK1160_EP_AUDIO 0x81 /* Max and min video buffers */ #define STK1160_MIN_VIDEO_BUFFERS 8 #define STK1160_MAX_VIDEO_BUFFERS 32 #define STK1160_MIN_PKT_SIZE 3072 #define STK1160_MAX_INPUT 4 #define STK1160_SVIDEO_INPUT 4 #define STK1160_AC97_TIMEOUT 50 #define STK1160_I2C_TIMEOUT 100 /* TODO: Print helpers * I could use dev_xxx, pr_xxx, v4l2_xxx or printk. * However, there isn't a solid consensus on which * new drivers should use. * */ #ifdef DEBUG #define stk1160_dbg(fmt, args...) \ printk(KERN_DEBUG "stk1160: " fmt, ## args) #else #define stk1160_dbg(fmt, args...) #endif #define stk1160_info(fmt, args...) \ pr_info("stk1160: " fmt, ## args) #define stk1160_warn(fmt, args...) \ pr_warn("stk1160: " fmt, ## args) #define stk1160_err(fmt, args...) \ pr_err("stk1160: " fmt, ## args) /* Buffer for one video frame */ struct stk1160_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vb; struct list_head list; void *mem; unsigned int length; /* buffer length */ unsigned int bytesused; /* bytes written */ int odd; /* current oddity */ /* * Since we interlace two fields per frame, * this is different from bytesused. */ unsigned int pos; /* current pos inside buffer */ }; struct stk1160_isoc_ctl { /* max packet size of isoc transaction */ int max_pkt_size; /* number of allocated urbs */ int num_bufs; /* urb for isoc transfers */ struct urb **urb; /* transfer buffers for isoc transfer */ char **transfer_buffer; /* current buffer */ struct stk1160_buffer *buf; }; struct stk1160_fmt { char *name; u32 fourcc; /* v4l2 format id */ int depth; }; struct stk1160 { struct v4l2_device v4l2_dev; struct video_device vdev; struct v4l2_ctrl_handler ctrl_handler; struct device *dev; struct usb_device *udev; /* saa7115 subdev */ struct v4l2_subdev *sd_saa7115; /* isoc control struct */ struct list_head avail_bufs; /* video capture */ struct vb2_queue vb_vidq; /* max packet size of isoc transaction */ int max_pkt_size; /* array of wMaxPacketSize */ unsigned int *alt_max_pkt_size; /* alternate */ int alt; /* Number of alternative settings */ int num_alt; struct stk1160_isoc_ctl isoc_ctl; /* frame properties */ int width; /* current frame width */ int height; /* current frame height */ unsigned int ctl_input; /* selected input */ v4l2_std_id norm; /* current norm */ struct stk1160_fmt *fmt; /* selected format */ unsigned int sequence; /* i2c i/o */ struct i2c_adapter i2c_adap; struct i2c_client i2c_client; struct mutex v4l_lock; struct mutex vb_queue_lock; spinlock_t buf_lock; struct file *fh_owner; /* filehandle ownership */ /* EXPERIMENTAL */ struct snd_card *snd_card; }; struct regval { u16 reg; u16 val; }; /* Provided by stk1160-v4l.c */ int stk1160_vb2_setup(struct stk1160 *dev); int stk1160_video_register(struct stk1160 *dev); void stk1160_video_unregister(struct stk1160 *dev); void stk1160_clear_queue(struct stk1160 *dev); /* Provided by stk1160-video.c */ int stk1160_alloc_isoc(struct stk1160 *dev); void stk1160_free_isoc(struct stk1160 *dev); void stk1160_cancel_isoc(struct stk1160 *dev); void stk1160_uninit_isoc(struct stk1160 *dev); /* Provided by stk1160-i2c.c */ int stk1160_i2c_register(struct stk1160 *dev); int stk1160_i2c_unregister(struct stk1160 *dev); /* Provided by stk1160-core.c */ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value); int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value); int stk1160_write_regs_req(struct stk1160 *dev, u8 req, u16 reg, char *buf, int len); int stk1160_read_reg_req_len(struct stk1160 *dev, u8 req, u16 reg, char *buf, int len); void stk1160_select_input(struct stk1160 *dev); /* Provided by stk1160-ac97.c */ void stk1160_ac97_setup(struct stk1160 *dev);
null
null
null
null
108,133
40,956
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,951
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __KERN_LEVELS_H__ #define __KERN_LEVELS_H__ #define KERN_SOH "\001" /* ASCII Start Of Header */ #define KERN_SOH_ASCII '\001' #define KERN_EMERG KERN_SOH "0" /* system is unusable */ #define KERN_ALERT KERN_SOH "1" /* action must be taken immediately */ #define KERN_CRIT KERN_SOH "2" /* critical conditions */ #define KERN_ERR KERN_SOH "3" /* error conditions */ #define KERN_WARNING KERN_SOH "4" /* warning conditions */ #define KERN_NOTICE KERN_SOH "5" /* normal but significant condition */ #define KERN_INFO KERN_SOH "6" /* informational */ #define KERN_DEBUG KERN_SOH "7" /* debug-level messages */ #define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */ /* * Annotation for a "continued" line of log printout (only done after a * line that had no enclosing \n). Only to be used by core/arch code * during early bootup (a continued line is not SMP-safe otherwise). */ #define KERN_CONT KERN_SOH "c" /* integer equivalents of KERN_<LEVEL> */ #define LOGLEVEL_SCHED -2 /* Deferred messages from sched code * are set to this special level */ #define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ #define LOGLEVEL_EMERG 0 /* system is unusable */ #define LOGLEVEL_ALERT 1 /* action must be taken immediately */ #define LOGLEVEL_CRIT 2 /* critical conditions */ #define LOGLEVEL_ERR 3 /* error conditions */ #define LOGLEVEL_WARNING 4 /* warning conditions */ #define LOGLEVEL_NOTICE 5 /* normal but significant condition */ #define LOGLEVEL_INFO 6 /* informational */ #define LOGLEVEL_DEBUG 7 /* debug-level messages */ #endif
null
null
null
null
114,298
36,075
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
36,075
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_STYLE_NAMED_GRID_LINES_MAP_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_STYLE_NAMED_GRID_LINES_MAP_H_ #include "third_party/blink/renderer/platform/wtf/hash_map.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" #include "third_party/blink/renderer/platform/wtf/vector.h" namespace blink { using NamedGridLinesMap = HashMap<String, Vector<size_t>>; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_STYLE_NAMED_GRID_LINES_MAP_H_
null
null
null
null
32,938
35,729
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
200,724
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* drivers/char/watchdog/scx200_wdt.c National Semiconductor SCx200 Watchdog support Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> Some code taken from: National Semiconductor PC87307/PC97307 (ala SC1200) WDT driver (c) Copyright 2002 Zwane Mwaikambo <zwane@commfireservices.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The author(s) of this software shall not be held liable for damages of any nature resulting due to the use of this software. This software is provided AS-IS with no warranties. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/fs.h> #include <linux/ioport.h> #include <linux/scx200.h> #include <linux/uaccess.h> #include <linux/io.h> #define DEBUG MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 Watchdog Driver"); MODULE_LICENSE("GPL"); static int margin = 60; /* in seconds */ module_param(margin, int, 0); MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); static u16 wdto_restart; static char expect_close; static unsigned long open_lock; static DEFINE_SPINLOCK(scx_lock); /* Bits of the WDCNFG register */ #define W_ENABLE 0x00fa /* Enable watchdog */ #define W_DISABLE 0x0000 /* Disable watchdog */ /* The scaling factor for the timer, this depends on the value of W_ENABLE */ #define W_SCALE (32768/1024) static void scx200_wdt_ping(void) { spin_lock(&scx_lock); outw(wdto_restart, scx200_cb_base + SCx200_WDT_WDTO); spin_unlock(&scx_lock); } static void scx200_wdt_update_margin(void) { pr_info("timer margin %d seconds\n", margin); wdto_restart = margin * W_SCALE; } static void scx200_wdt_enable(void) { pr_debug("enabling watchdog timer, wdto_restart = %d\n", wdto_restart); spin_lock(&scx_lock); outw(0, scx200_cb_base + SCx200_WDT_WDTO); outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS); outw(W_ENABLE, scx200_cb_base + SCx200_WDT_WDCNFG); spin_unlock(&scx_lock); scx200_wdt_ping(); } static void scx200_wdt_disable(void) { pr_debug("disabling watchdog timer\n"); spin_lock(&scx_lock); outw(0, scx200_cb_base + SCx200_WDT_WDTO); outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS); outw(W_DISABLE, scx200_cb_base + SCx200_WDT_WDCNFG); spin_unlock(&scx_lock); } static int scx200_wdt_open(struct inode *inode, struct file *file) { /* only allow one at a time */ if (test_and_set_bit(0, &open_lock)) return -EBUSY; scx200_wdt_enable(); return nonseekable_open(inode, file); } static int scx200_wdt_release(struct inode *inode, struct file *file) { if (expect_close != 42) pr_warn("watchdog device closed unexpectedly, will not disable the watchdog timer\n"); else if (!nowayout) scx200_wdt_disable(); expect_close = 0; clear_bit(0, &open_lock); return 0; } static int scx200_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_HALT || code == SYS_POWER_OFF) if (!nowayout) scx200_wdt_disable(); return NOTIFY_DONE; } static struct notifier_block scx200_wdt_notifier = { .notifier_call = scx200_wdt_notify_sys, }; static ssize_t scx200_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* check for a magic close character */ if (len) { size_t i; scx200_wdt_ping(); expect_close = 0; for (i = 0; i < len; ++i) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } return len; } return 0; } static long scx200_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .identity = "NatSemi SCx200 Watchdog", .firmware_version = 1, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; int new_margin; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &ident, sizeof(ident))) return -EFAULT; return 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: if (put_user(0, p)) return -EFAULT; return 0; case WDIOC_KEEPALIVE: scx200_wdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, p)) return -EFAULT; if (new_margin < 1) return -EINVAL; margin = new_margin; scx200_wdt_update_margin(); scx200_wdt_ping(); case WDIOC_GETTIMEOUT: if (put_user(margin, p)) return -EFAULT; return 0; default: return -ENOTTY; } } static const struct file_operations scx200_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = scx200_wdt_write, .unlocked_ioctl = scx200_wdt_ioctl, .open = scx200_wdt_open, .release = scx200_wdt_release, }; static struct miscdevice scx200_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &scx200_wdt_fops, }; static int __init scx200_wdt_init(void) { int r; pr_debug("NatSemi SCx200 Watchdog Driver\n"); /* check that we have found the configuration block */ if (!scx200_cb_present()) return -ENODEV; if (!request_region(scx200_cb_base + SCx200_WDT_OFFSET, SCx200_WDT_SIZE, "NatSemi SCx200 Watchdog")) { pr_warn("watchdog I/O region busy\n"); return -EBUSY; } scx200_wdt_update_margin(); scx200_wdt_disable(); r = register_reboot_notifier(&scx200_wdt_notifier); if (r) { pr_err("unable to register reboot notifier\n"); release_region(scx200_cb_base + SCx200_WDT_OFFSET, SCx200_WDT_SIZE); return r; } r = misc_register(&scx200_wdt_miscdev); if (r) { unregister_reboot_notifier(&scx200_wdt_notifier); release_region(scx200_cb_base + SCx200_WDT_OFFSET, SCx200_WDT_SIZE); return r; } return 0; } static void __exit scx200_wdt_cleanup(void) { misc_deregister(&scx200_wdt_miscdev); unregister_reboot_notifier(&scx200_wdt_notifier); release_region(scx200_cb_base + SCx200_WDT_OFFSET, SCx200_WDT_SIZE); } module_init(scx200_wdt_init); module_exit(scx200_wdt_cleanup); /* Local variables: compile-command: "make -k -C ../.. SUBDIRS=drivers/char modules" c-basic-offset: 8 End: */
null
null
null
null
109,071
6,190
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
171,185
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* sound/soc/samsung/jive_wm8750.c * * Copyright 2007,2008 Simtec Electronics * * Based on sound/soc/pxa/spitz.c * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <sound/soc.h> #include <asm/mach-types.h> #include "s3c2412-i2s.h" #include "../codecs/wm8750.h" static const struct snd_soc_dapm_route audio_map[] = { { "Headphone Jack", NULL, "LOUT1" }, { "Headphone Jack", NULL, "ROUT1" }, { "Internal Speaker", NULL, "LOUT2" }, { "Internal Speaker", NULL, "ROUT2" }, { "LINPUT1", NULL, "Line Input" }, { "RINPUT1", NULL, "Line Input" }, }; static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_SPK("Internal Speaker", NULL), SND_SOC_DAPM_LINE("Line In", NULL), }; static int jive_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct s3c_i2sv2_rate_calc div; unsigned int clk = 0; int ret = 0; switch (params_rate(params)) { case 8000: case 16000: case 48000: case 96000: clk = 12288000; break; case 11025: case 22050: case 44100: clk = 11289600; break; } s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params), s3c_i2sv2_get_clock(cpu_dai)); /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_RCLK, div.fs_div); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_PRESCALER, div.clk_div - 1); if (ret < 0) return ret; return 0; } static struct snd_soc_ops jive_ops = { .hw_params = jive_hw_params, }; static struct snd_soc_dai_link jive_dai = { .name = "wm8750", .stream_name = "WM8750", .cpu_dai_name = "s3c2412-i2s", .codec_dai_name = "wm8750-hifi", .platform_name = "s3c2412-i2s", .codec_name = "wm8750.0-001a", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &jive_ops, }; /* jive audio machine driver */ static struct snd_soc_card snd_soc_machine_jive = { .name = "Jive", .owner = THIS_MODULE, .dai_link = &jive_dai, .num_links = 1, .dapm_widgets = wm8750_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets), .dapm_routes = audio_map, .num_dapm_routes = ARRAY_SIZE(audio_map), .fully_routed = true, }; static struct platform_device *jive_snd_device; static int __init jive_init(void) { int ret; if (!machine_is_jive()) return 0; printk("JIVE WM8750 Audio support\n"); jive_snd_device = platform_device_alloc("soc-audio", -1); if (!jive_snd_device) return -ENOMEM; platform_set_drvdata(jive_snd_device, &snd_soc_machine_jive); ret = platform_device_add(jive_snd_device); if (ret) platform_device_put(jive_snd_device); return ret; } static void __exit jive_exit(void) { platform_device_unregister(jive_snd_device); } module_init(jive_init); module_exit(jive_exit); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("ALSA SoC Jive Audio support"); MODULE_LICENSE("GPL");
null
null
null
null
79,532
24,762
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,757
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __NVKM_SECBOOT_LS_UCODE_H__ #define __NVKM_SECBOOT_LS_UCODE_H__ #include <core/os.h> #include <core/subdev.h> #include <subdev/secboot.h> struct nvkm_acr; /** * struct ls_ucode_img_desc - descriptor of firmware image * @descriptor_size: size of this descriptor * @image_size: size of the whole image * @bootloader_start_offset: start offset of the bootloader in ucode image * @bootloader_size: size of the bootloader * @bootloader_imem_offset: start off set of the bootloader in IMEM * @bootloader_entry_point: entry point of the bootloader in IMEM * @app_start_offset: start offset of the LS firmware * @app_size: size of the LS firmware's code and data * @app_imem_offset: offset of the app in IMEM * @app_imem_entry: entry point of the app in IMEM * @app_dmem_offset: offset of the data in DMEM * @app_resident_code_offset: offset of app code from app_start_offset * @app_resident_code_size: size of the code * @app_resident_data_offset: offset of data from app_start_offset * @app_resident_data_size: size of data * * A firmware image contains the code, data, and bootloader of a given LS * falcon in a single blob. This structure describes where everything is. * * This can be generated from a (bootloader, code, data) set if they have * been loaded separately, or come directly from a file. */ struct ls_ucode_img_desc { u32 descriptor_size; u32 image_size; u32 tools_version; u32 app_version; char date[64]; u32 bootloader_start_offset; u32 bootloader_size; u32 bootloader_imem_offset; u32 bootloader_entry_point; u32 app_start_offset; u32 app_size; u32 app_imem_offset; u32 app_imem_entry; u32 app_dmem_offset; u32 app_resident_code_offset; u32 app_resident_code_size; u32 app_resident_data_offset; u32 app_resident_data_size; u32 nb_overlays; struct {u32 start; u32 size; } load_ovl[64]; u32 compressed; }; /** * struct ls_ucode_img - temporary storage for loaded LS firmwares * @node: to link within lsf_ucode_mgr * @falcon_id: ID of the falcon this LS firmware is for * @ucode_desc: loaded or generated map of ucode_data * @ucode_data: firmware payload (code and data) * @ucode_size: size in bytes of data in ucode_data * @ucode_off: offset of the ucode in ucode_data * @sig: signature for this firmware * @sig:size: size of the signature in bytes * * Preparing the WPR LS blob requires information about all the LS firmwares * (size, etc) to be known. This structure contains all the data of one LS * firmware. */ struct ls_ucode_img { struct list_head node; enum nvkm_secboot_falcon falcon_id; struct ls_ucode_img_desc ucode_desc; u8 *ucode_data; u32 ucode_size; u32 ucode_off; u8 *sig; u32 sig_size; }; /** * struct fw_bin_header - header of firmware files * @bin_magic: always 0x3b1d14f0 * @bin_ver: version of the bin format * @bin_size: entire image size including this header * @header_offset: offset of the firmware/bootloader header in the file * @data_offset: offset of the firmware/bootloader payload in the file * @data_size: size of the payload * * This header is located at the beginning of the HS firmware and HS bootloader * files, to describe where the headers and data can be found. */ struct fw_bin_header { u32 bin_magic; u32 bin_ver; u32 bin_size; u32 header_offset; u32 data_offset; u32 data_size; }; /** * struct fw_bl_desc - firmware bootloader descriptor * @start_tag: starting tag of bootloader * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc * @code_off: offset of code section * @code_size: size of code section * @data_off: offset of data section * @data_size: size of data section * * This structure is embedded in bootloader firmware files at to describe the * IMEM and DMEM layout expected by the bootloader. */ struct fw_bl_desc { u32 start_tag; u32 dmem_load_off; u32 code_off; u32 code_size; u32 data_off; u32 data_size; }; int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *); int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *); int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *); int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *); int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); #endif
null
null
null
null
98,104
40,777
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,772
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __LINUX_HYPEVISOR_H #define __LINUX_HYPEVISOR_H /* * Generic Hypervisor support * Juergen Gross <jgross@suse.com> */ #ifdef CONFIG_HYPERVISOR_GUEST #include <asm/hypervisor.h> #else static inline void hypervisor_pin_vcpu(int cpu) { } #endif #endif /* __LINUX_HYPEVISOR_H */
null
null
null
null
114,119
40,271
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,266
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/include/linux/ext2_fs.h * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/include/linux/minix_fs.h * * Copyright (C) 1991, 1992 Linus Torvalds */ #ifndef _LINUX_EXT2_FS_H #define _LINUX_EXT2_FS_H #include <linux/types.h> #include <linux/magic.h> #define EXT2_NAME_LEN 255 /* * Maximal count of links to a file */ #define EXT2_LINK_MAX 32000 #define EXT2_SB_MAGIC_OFFSET 0x38 #define EXT2_SB_BLOCKS_OFFSET 0x04 #define EXT2_SB_BSIZE_OFFSET 0x18 static inline u64 ext2_image_size(void *ext2_sb) { __u8 *p = ext2_sb; if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC)) return 0; return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) << le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET)); } #endif /* _LINUX_EXT2_FS_H */
null
null
null
null
113,613
4,455
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
169,450
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) International Business Machines Corp., 2000-2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/quotaops.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" #include "jfs_imap.h" #include "jfs_dinode.h" #include "jfs_debug.h" void jfs_set_inode_flags(struct inode *inode) { unsigned int flags = JFS_IP(inode)->mode2; unsigned int new_fl = 0; if (flags & JFS_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & JFS_APPEND_FL) new_fl |= S_APPEND; if (flags & JFS_NOATIME_FL) new_fl |= S_NOATIME; if (flags & JFS_DIRSYNC_FL) new_fl |= S_DIRSYNC; if (flags & JFS_SYNC_FL) new_fl |= S_SYNC; inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND | S_NOATIME | S_DIRSYNC | S_SYNC); } void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip) { unsigned int flags = jfs_ip->vfs_inode.i_flags; jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_APPEND_FL | JFS_NOATIME_FL | JFS_DIRSYNC_FL | JFS_SYNC_FL); if (flags & S_IMMUTABLE) jfs_ip->mode2 |= JFS_IMMUTABLE_FL; if (flags & S_APPEND) jfs_ip->mode2 |= JFS_APPEND_FL; if (flags & S_NOATIME) jfs_ip->mode2 |= JFS_NOATIME_FL; if (flags & S_DIRSYNC) jfs_ip->mode2 |= JFS_DIRSYNC_FL; if (flags & S_SYNC) jfs_ip->mode2 |= JFS_SYNC_FL; } /* * NAME: ialloc() * * FUNCTION: Allocate a new inode * */ struct inode *ialloc(struct inode *parent, umode_t mode) { struct super_block *sb = parent->i_sb; struct inode *inode; struct jfs_inode_info *jfs_inode; int rc; inode = new_inode(sb); if (!inode) { jfs_warn("ialloc: new_inode returned NULL!"); rc = -ENOMEM; goto fail; } jfs_inode = JFS_IP(inode); rc = diAlloc(parent, S_ISDIR(mode), inode); if (rc) { jfs_warn("ialloc: diAlloc returned %d!", rc); if (rc == -EIO) make_bad_inode(inode); goto fail_put; } if (insert_inode_locked(inode) < 0) { rc = -EINVAL; goto fail_put; } inode_init_owner(inode, parent, mode); /* * New inodes need to save sane values on disk when * uid & gid mount options are used */ jfs_inode->saved_uid = inode->i_uid; jfs_inode->saved_gid = inode->i_gid; /* * Allocate inode to quota. */ rc = dquot_initialize(inode); if (rc) goto fail_drop; rc = dquot_alloc_inode(inode); if (rc) goto fail_drop; /* inherit flags from parent */ jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT; if (S_ISDIR(mode)) { jfs_inode->mode2 |= IDIRECTORY; jfs_inode->mode2 &= ~JFS_DIRSYNC_FL; } else { jfs_inode->mode2 |= INLINEEA | ISPARSE; if (S_ISLNK(mode)) jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL); } jfs_inode->mode2 |= inode->i_mode; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); jfs_inode->otime = inode->i_ctime.tv_sec; inode->i_generation = JFS_SBI(sb)->gengen++; jfs_inode->cflag = 0; /* Zero remaining fields */ memset(&jfs_inode->acl, 0, sizeof(dxd_t)); memset(&jfs_inode->ea, 0, sizeof(dxd_t)); jfs_inode->next_index = 0; jfs_inode->acltype = 0; jfs_inode->btorder = 0; jfs_inode->btindex = 0; jfs_inode->bxflag = 0; jfs_inode->blid = 0; jfs_inode->atlhead = 0; jfs_inode->atltail = 0; jfs_inode->xtlid = 0; jfs_set_inode_flags(inode); jfs_info("ialloc returns inode = 0x%p", inode); return inode; fail_drop: dquot_drop(inode); inode->i_flags |= S_NOQUOTA; clear_nlink(inode); unlock_new_inode(inode); fail_put: iput(inode); fail: return ERR_PTR(rc); }
null
null
null
null
77,797
14,878
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,878
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_USER_MANAGER_USER_MANAGER_BASE_H_ #define COMPONENTS_USER_MANAGER_USER_MANAGER_BASE_H_ #include <map> #include <memory> #include <set> #include <string> #include <vector> #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/observer_list.h" #include "base/synchronization/lock.h" #include "base/time/time.h" #include "components/signin/core/account_id/account_id.h" #include "components/user_manager/user.h" #include "components/user_manager/user_manager.h" #include "components/user_manager/user_manager_export.h" #include "components/user_manager/user_type.h" class PrefRegistrySimple; namespace base { class ListValue; class TaskRunner; } namespace user_manager { class RemoveUserDelegate; // Base implementation of the UserManager interface. class USER_MANAGER_EXPORT UserManagerBase : public UserManager { public: // Creates UserManagerBase with |task_runner| for UI thread and // |blocking_task_runner| for SequencedWorkerPool. explicit UserManagerBase(scoped_refptr<base::TaskRunner> task_runner); ~UserManagerBase() override; // Registers UserManagerBase preferences. static void RegisterPrefs(PrefRegistrySimple* registry); // UserManager implementation: void Shutdown() override; const UserList& GetUsers() const override; const UserList& GetLoggedInUsers() const override; const UserList& GetLRULoggedInUsers() const override; const AccountId& GetOwnerAccountId() const override; void UserLoggedIn(const AccountId& account_id, const std::string& user_id_hash, bool browser_restart, bool is_child) override; void SwitchActiveUser(const AccountId& account_id) override; void SwitchToLastActiveUser() override; void OnSessionStarted() override; void OnProfileInitialized(User* user) override; void RemoveUser(const AccountId& account_id, RemoveUserDelegate* delegate) override; void RemoveUserFromList(const AccountId& account_id) override; bool IsKnownUser(const AccountId& account_id) const override; const User* FindUser(const AccountId& account_id) const override; User* FindUserAndModify(const AccountId& account_id) override; const User* GetActiveUser() const override; User* GetActiveUser() override; const User* GetPrimaryUser() const override; void SaveUserOAuthStatus(const AccountId& account_id, User::OAuthTokenStatus oauth_token_status) override; void SaveForceOnlineSignin(const AccountId& account_id, bool force_online_signin) override; void SaveUserDisplayName(const AccountId& account_id, const base::string16& display_name) override; base::string16 GetUserDisplayName(const AccountId& account_id) const override; void SaveUserDisplayEmail(const AccountId& account_id, const std::string& display_email) override; std::string GetUserDisplayEmail(const AccountId& account_id) const override; void SaveUserType(const User* user) override; void UpdateUserAccountData(const AccountId& account_id, const UserAccountData& account_data) override; bool IsCurrentUserOwner() const override; bool IsCurrentUserNew() const override; bool IsCurrentUserNonCryptohomeDataEphemeral() const override; bool IsCurrentUserCryptohomeDataEphemeral() const override; bool CanCurrentUserLock() const override; bool IsUserLoggedIn() const override; bool IsLoggedInAsUserWithGaiaAccount() const override; bool IsLoggedInAsChildUser() const override; bool IsLoggedInAsPublicAccount() const override; bool IsLoggedInAsGuest() const override; bool IsLoggedInAsSupervisedUser() const override; bool IsLoggedInAsKioskApp() const override; bool IsLoggedInAsArcKioskApp() const override; bool IsLoggedInAsStub() const override; bool IsUserNonCryptohomeDataEphemeral( const AccountId& account_id) const override; bool IsUserCryptohomeDataEphemeral( const AccountId& account_id) const override; void AddObserver(UserManager::Observer* obs) override; void RemoveObserver(UserManager::Observer* obs) override; void AddSessionStateObserver( UserManager::UserSessionStateObserver* obs) override; void RemoveSessionStateObserver( UserManager::UserSessionStateObserver* obs) override; void NotifyLocalStateChanged() override; void NotifyUserImageChanged(const User& user) override; void NotifyUserProfileImageUpdateFailed(const User& user) override; void NotifyUserProfileImageUpdated( const User& user, const gfx::ImageSkia& profile_image) override; void NotifyUsersSignInConstraintsChanged() override; void ResetProfileEverInitialized(const AccountId& account_id) override; void Initialize() override; // This method updates "User was added to the device in this session nad is // not full initialized yet" flag. virtual void SetIsCurrentUserNew(bool is_new); // Helper function that converts users from |users_list| to |users_vector| and // |users_set|. Duplicates and users already present in |existing_users| are // skipped. void ParseUserList(const base::ListValue& users_list, const std::set<AccountId>& existing_users, std::vector<AccountId>* users_vector, std::set<AccountId>* users_set); // Returns true if trusted device policies have successfully been retrieved // and ephemeral users are enabled. virtual bool AreEphemeralUsersEnabled() const = 0; void AddUserRecordForTesting(User* user) { return AddUserRecord(user); } // Returns true if device is enterprise managed. virtual bool IsEnterpriseManaged() const = 0; protected: // Adds |user| to users list, and adds it to front of LRU list. It is assumed // that there is no user with same id. virtual void AddUserRecord(User* user); // Returns true if user may be removed. virtual bool CanUserBeRemoved(const User* user) const; // A wrapper around C++ delete operator. Deletes |user|, and when |user| // equals to active_user_, active_user_ is reset to NULL. virtual void DeleteUser(User* user); // Returns the locale used by the application. virtual const std::string& GetApplicationLocale() const = 0; // Loads |users_| from Local State if the list has not been loaded yet. // Subsequent calls have no effect. Must be called on the UI thread. virtual void EnsureUsersLoaded(); // Handle OAuth token |status| change for |account_id|. virtual void HandleUserOAuthTokenStatusChange( const AccountId& account_id, User::OAuthTokenStatus status) const = 0; // Loads device local accounts from the Local state and fills in // |device_local_accounts_set|. virtual void LoadDeviceLocalAccounts( std::set<AccountId>* device_local_accounts_set) = 0; // Notifies that user has logged in. virtual void NotifyOnLogin(); // Notifies observers that another user was added to the session. // If |user_switch_pending| is true this means that user has not been fully // initialized yet like waiting for profile to be loaded. virtual void NotifyUserAddedToSession(const User* added_user, bool user_switch_pending); // Performs any additional actions before user list is loaded. virtual void PerformPreUserListLoadingActions() = 0; // Performs any additional actions after user list is loaded. virtual void PerformPostUserListLoadingActions() = 0; // Performs any additional actions after UserLoggedIn() execution has been // completed. // |browser_restart| is true when reloading Chrome after crash to distinguish // from normal sign in flow. virtual void PerformPostUserLoggedInActions(bool browser_restart) = 0; // Implementation for RemoveUser method. It is synchronous. It is called from // RemoveUserInternal after owner check. virtual void RemoveNonOwnerUserInternal(const AccountId& account_id, RemoveUserDelegate* delegate); // Removes a regular or supervised user from the user list. // Returns the user if found or NULL otherwise. // Also removes the user from the persistent user list. // |notify| is true when OnUserRemoved() should be triggered, // meaning that the user won't be added after the removal. User* RemoveRegularOrSupervisedUserFromList(const AccountId& account_id, bool notify); // Implementation for RemoveUser method. This is an asynchronous part of the // method, that verifies that owner will not get deleted, and calls // |RemoveNonOwnerUserInternal|. virtual void RemoveUserInternal(const AccountId& account_id, RemoveUserDelegate* delegate); // Removes data stored or cached outside the user's cryptohome (wallpaper, // avatar, OAuth token status, display name, display email). virtual void RemoveNonCryptohomeData(const AccountId& account_id); // Check for a particular user type. // Returns true if |account_id| represents demo app. virtual bool IsDemoApp(const AccountId& account_id) const = 0; // These methods are called when corresponding user type has signed in. // Indicates that the demo account has just logged in. virtual void DemoAccountLoggedIn() = 0; // Indicates that a user just logged in as guest. virtual void GuestUserLoggedIn(); // Indicates that a kiosk app robot just logged in. virtual void KioskAppLoggedIn(User* user) = 0; // Indicates that an ARC kiosk app robot just logged in. virtual void ArcKioskAppLoggedIn(User* user) = 0; // Indicates that a user just logged into a public session. virtual void PublicAccountUserLoggedIn(User* user) = 0; // Indicates that a regular user just logged in. virtual void RegularUserLoggedIn(const AccountId& account_id, const UserType user_type); // Indicates that a regular user just logged in as ephemeral. virtual void RegularUserLoggedInAsEphemeral(const AccountId& account_id, const UserType user_type); // Indicates that a supervised user just logged in. virtual void SupervisedUserLoggedIn(const AccountId& account_id) = 0; // Should be called when regular user was removed. virtual void OnUserRemoved(const AccountId& account_id) = 0; // Update the global LoginState. virtual void UpdateLoginState(const User* active_user, const User* primary_user, bool is_current_user_owner) const = 0; // Getters/setters for private members. virtual bool GetEphemeralUsersEnabled() const; virtual void SetEphemeralUsersEnabled(bool enabled); virtual void SetOwnerId(const AccountId& owner_account_id); virtual const AccountId& GetPendingUserSwitchID() const; virtual void SetPendingUserSwitchId(const AccountId& account_id); // The logged-in user that is currently active in current session. // NULL until a user has logged in, then points to one // of the User instances in |users_|, the |guest_user_| instance or an // ephemeral user instance. User* active_user_ = nullptr; // The primary user of the current session. It is recorded for the first // signed-in user and does not change thereafter. User* primary_user_ = nullptr; // List of all known users. User instances are owned by |this|. Regular users // are removed by |RemoveUserFromList|, device local accounts by // |UpdateAndCleanUpDeviceLocalAccounts|. UserList users_; // List of all users that are logged in current session. These point to User // instances in |users_|. Only one of them could be marked as active. UserList logged_in_users_; // A list of all users that are logged in the current session. In contrast to // |logged_in_users|, the order of this list is least recently used so that // the active user should always be the first one in the list. UserList lru_logged_in_users_; private: // Stages of loading user list from preferences. Some methods can have // different behavior depending on stage. enum UserLoadStage { STAGE_NOT_LOADED = 0, STAGE_LOADING, STAGE_LOADED }; // Returns a list of users who have logged into this device previously. // Same as GetUsers but used if you need to modify User from that list. UserList& GetUsersAndModify(); // Returns the user with the given email address if found in the persistent // list. Returns |NULL| otherwise. const User* FindUserInList(const AccountId& account_id) const; // Returns |true| if user with the given id is found in the persistent list. // Returns |false| otherwise. Does not trigger user loading. bool UserExistsInList(const AccountId& account_id) const; // Same as FindUserInList but returns non-const pointer to User object. User* FindUserInListAndModify(const AccountId& account_id); // Reads user's oauth token status from local state preferences. User::OAuthTokenStatus LoadUserOAuthStatus(const AccountId& account_id) const; // Read a flag indicating whether online authentication against GAIA should // be enforced during the user's next sign-in from local state preferences. bool LoadForceOnlineSignin(const AccountId& account_id) const; // Read a flag indicating whether session initialization has completed at // least once. bool LoadSessionInitialized(const AccountId& account_id) const; // Notifies observers that merge session state had changed. void NotifyMergeSessionStateChanged(); // Notifies observers that active user has changed. void NotifyActiveUserChanged(const User* active_user); // Notifies observers that active account_id hash has changed. void NotifyActiveUserHashChanged(const std::string& hash); // Call UpdateLoginState. void CallUpdateLoginState(); // Insert |user| at the front of the LRU user list. void SetLRUUser(User* user); // Sends metrics in response to a user with gaia account (regular) logging in. void SendGaiaUserLoginMetrics(const AccountId& account_id); // Sets account locale for user with id |account_id|. virtual void UpdateUserAccountLocale(const AccountId& account_id, const std::string& locale); // Updates user account after locale was resolved. void DoUpdateAccountLocale(const AccountId& account_id, std::unique_ptr<std::string> resolved_locale); // Indicates stage of loading user from prefs. UserLoadStage user_loading_stage_ = STAGE_NOT_LOADED; // Cached flag of whether the currently logged-in user existed before this // login. bool is_current_user_new_ = false; // Cached flag of whether the currently logged-in user is a regular user who // logged in as ephemeral. Storage of persistent information is avoided for // such users by not adding them to the persistent user list, not downloading // their custom avatars and mounting their cryptohomes using tmpfs. Defaults // to |false|. bool is_current_user_ephemeral_regular_user_ = false; // Cached flag indicating whether the ephemeral user policy is enabled. // Defaults to |false| if the value has not been read from trusted device // policy yet. bool ephemeral_users_enabled_ = false; // Cached name of device owner. Defaults to empty if the value has not // been read from trusted device policy yet. AccountId owner_account_id_ = EmptyAccountId(); base::ObserverList<UserManager::Observer> observer_list_; // TODO(nkostylev): Merge with session state refactoring CL. base::ObserverList<UserManager::UserSessionStateObserver> session_state_observer_list_; // Time at which this object was created. base::TimeTicks manager_creation_time_ = base::TimeTicks::Now(); // ID of the user just added to the session that needs to be activated // as soon as user's profile is loaded. AccountId pending_user_switch_ = EmptyAccountId(); // ID of the user that was active in the previous session. // Preference value is stored here before first user signs in // because pref will be overidden once session restore starts. AccountId last_session_active_account_id_ = EmptyAccountId(); bool last_session_active_account_id_initialized_ = false; // TaskRunner for UI thread. scoped_refptr<base::TaskRunner> task_runner_; base::WeakPtrFactory<UserManagerBase> weak_factory_; DISALLOW_COPY_AND_ASSIGN(UserManagerBase); }; } // namespace user_manager #endif // COMPONENTS_USER_MANAGER_USER_MANAGER_BASE_H_
null
null
null
null
11,741
7,324
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
172,319
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2007 Google, Inc. * Copyright (C) 2011 Intel, Inc. * Copyright (C) 2013 Intel, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/irq.h> #include <linux/platform_device.h> /* * Where in virtual device memory the IO devices (timers, system controllers * and so on) */ #define GOLDFISH_PDEV_BUS_BASE (0xff001000) #define GOLDFISH_PDEV_BUS_END (0xff7fffff) #define GOLDFISH_PDEV_BUS_IRQ (4) #define GOLDFISH_TTY_BASE (0x2000) static struct resource goldfish_pdev_bus_resources[] = { { .start = GOLDFISH_PDEV_BUS_BASE, .end = GOLDFISH_PDEV_BUS_END, .flags = IORESOURCE_MEM, }, { .start = GOLDFISH_PDEV_BUS_IRQ, .end = GOLDFISH_PDEV_BUS_IRQ, .flags = IORESOURCE_IRQ, } }; static bool goldfish_enable __initdata; static int __init goldfish_setup(char *str) { goldfish_enable = true; return 0; } __setup("goldfish", goldfish_setup); static int __init goldfish_init(void) { if (!goldfish_enable) return -ENODEV; platform_device_register_simple("goldfish_pdev_bus", -1, goldfish_pdev_bus_resources, 2); return 0; } device_initcall(goldfish_init);
null
null
null
null
80,666
30,004
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
194,999
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * MUSB OTG driver peripheral defines * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef __MUSB_GADGET_H #define __MUSB_GADGET_H #include <linux/list.h> #if IS_ENABLED(CONFIG_USB_MUSB_GADGET) || IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE) extern irqreturn_t musb_g_ep0_irq(struct musb *); extern void musb_g_tx(struct musb *, u8); extern void musb_g_rx(struct musb *, u8); extern void musb_g_reset(struct musb *); extern void musb_g_suspend(struct musb *); extern void musb_g_resume(struct musb *); extern void musb_g_wakeup(struct musb *); extern void musb_g_disconnect(struct musb *); extern void musb_gadget_cleanup(struct musb *); extern int musb_gadget_setup(struct musb *); #else static inline irqreturn_t musb_g_ep0_irq(struct musb *musb) { return 0; } static inline void musb_g_tx(struct musb *musb, u8 epnum) {} static inline void musb_g_rx(struct musb *musb, u8 epnum) {} static inline void musb_g_reset(struct musb *musb) {} static inline void musb_g_suspend(struct musb *musb) {} static inline void musb_g_resume(struct musb *musb) {} static inline void musb_g_wakeup(struct musb *musb) {} static inline void musb_g_disconnect(struct musb *musb) {} static inline void musb_gadget_cleanup(struct musb *musb) {} static inline int musb_gadget_setup(struct musb *musb) { return 0; } #endif enum buffer_map_state { UN_MAPPED = 0, PRE_MAPPED, MUSB_MAPPED }; struct musb_request { struct usb_request request; struct list_head list; struct musb_ep *ep; struct musb *musb; u8 tx; /* endpoint direction */ u8 epnum; enum buffer_map_state map_state; }; static inline struct musb_request *to_musb_request(struct usb_request *req) { return req ? container_of(req, struct musb_request, request) : NULL; } extern struct usb_request * musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); extern void musb_free_request(struct usb_ep *ep, struct usb_request *req); /* * struct musb_ep - peripheral side view of endpoint rx or tx side */ struct musb_ep { /* stuff towards the head is basically write-once. */ struct usb_ep end_point; char name[12]; struct musb_hw_ep *hw_ep; struct musb *musb; u8 current_epnum; /* ... when enabled/disabled ... */ u8 type; u8 is_in; u16 packet_sz; const struct usb_endpoint_descriptor *desc; struct dma_channel *dma; /* later things are modified based on usage */ struct list_head req_list; u8 wedged; /* true if lock must be dropped but req_list may not be advanced */ u8 busy; u8 hb_mult; }; static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) { return ep ? container_of(ep, struct musb_ep, end_point) : NULL; } static inline struct musb_request *next_request(struct musb_ep *ep) { struct list_head *queue = &ep->req_list; if (list_empty(queue)) return NULL; return container_of(queue->next, struct musb_request, list); } extern const struct usb_ep_ops musb_g_ep0_ops; extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); extern void musb_ep_restart(struct musb *, struct musb_request *); #endif /* __MUSB_GADGET_H */
null
null
null
null
103,346
31,121
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
31,121
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2016 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/renderer/core/frame/dom_visual_viewport.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/element.h" #include "third_party/blink/renderer/core/frame/local_dom_window.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/core/frame/local_frame_view.h" #include "third_party/blink/renderer/core/frame/visual_viewport.h" #include "third_party/blink/renderer/core/layout/adjust_for_absolute_zoom.h" #include "third_party/blink/renderer/core/page/page.h" #include "third_party/blink/renderer/core/style/computed_style.h" namespace blink { DOMVisualViewport::DOMVisualViewport(LocalDOMWindow* window) : window_(window) {} DOMVisualViewport::~DOMVisualViewport() = default; void DOMVisualViewport::Trace(blink::Visitor* visitor) { visitor->Trace(window_); EventTargetWithInlineData::Trace(visitor); } const AtomicString& DOMVisualViewport::InterfaceName() const { return EventTargetNames::DOMVisualViewport; } ExecutionContext* DOMVisualViewport::GetExecutionContext() const { return window_->GetExecutionContext(); } float DOMVisualViewport::offsetLeft() const { LocalFrame* frame = window_->GetFrame(); if (!frame || !frame->IsMainFrame()) return 0; if (Page* page = frame->GetPage()) return page->GetVisualViewport().OffsetLeft(); return 0; } float DOMVisualViewport::offsetTop() const { LocalFrame* frame = window_->GetFrame(); if (!frame || !frame->IsMainFrame()) return 0; if (Page* page = frame->GetPage()) return page->GetVisualViewport().OffsetTop(); return 0; } float DOMVisualViewport::pageLeft() const { LocalFrame* frame = window_->GetFrame(); if (!frame) return 0; Page* page = frame->GetPage(); if (!page) return 0; LocalFrameView* view = frame->View(); if (!view || !view->LayoutViewportScrollableArea()) return 0; frame->GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets(); float viewport_x = page->GetVisualViewport().GetScrollOffset().Width() + view->LayoutViewportScrollableArea()->GetScrollOffset().Width(); return AdjustForAbsoluteZoom::AdjustScroll(viewport_x, frame->PageZoomFactor()); } float DOMVisualViewport::pageTop() const { LocalFrame* frame = window_->GetFrame(); if (!frame) return 0; Page* page = frame->GetPage(); if (!page) return 0; LocalFrameView* view = frame->View(); if (!view || !view->LayoutViewportScrollableArea()) return 0; frame->GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets(); float viewport_y = page->GetVisualViewport().GetScrollOffset().Height() + view->LayoutViewportScrollableArea()->GetScrollOffset().Height(); return AdjustForAbsoluteZoom::AdjustScroll(viewport_y, frame->PageZoomFactor()); } double DOMVisualViewport::width() const { LocalFrame* frame = window_->GetFrame(); if (!frame) return 0; if (!frame->IsMainFrame()) { // Update layout to ensure scrollbars are up-to-date. frame->GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets(); auto* scrollable_area = frame->View()->LayoutViewportScrollableArea(); float width = scrollable_area->VisibleContentRect(kExcludeScrollbars).Width(); return AdjustForAbsoluteZoom::AdjustInt(clampTo<int>(ceilf(width)), frame->PageZoomFactor()); } if (Page* page = frame->GetPage()) return page->GetVisualViewport().Width(); return 0; } double DOMVisualViewport::height() const { LocalFrame* frame = window_->GetFrame(); if (!frame) return 0; if (!frame->IsMainFrame()) { // Update layout to ensure scrollbars are up-to-date. frame->GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets(); auto* scrollable_area = frame->View()->LayoutViewportScrollableArea(); float height = scrollable_area->VisibleContentRect(kExcludeScrollbars).Height(); return AdjustForAbsoluteZoom::AdjustInt(clampTo<int>(ceilf(height)), frame->PageZoomFactor()); } if (Page* page = frame->GetPage()) return page->GetVisualViewport().Height(); return 0; } double DOMVisualViewport::scale() const { LocalFrame* frame = window_->GetFrame(); if (!frame) return 0; if (!frame->IsMainFrame()) return 1; if (Page* page = window_->GetFrame()->GetPage()) return page->GetVisualViewport().ScaleForVisualViewport(); return 0; } } // namespace blink
null
null
null
null
27,984
54,889
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
54,889
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/renderer/extensions/file_browser_handler_custom_bindings.h" #include <string> #include "base/logging.h" #include "build/build_config.h" #include "extensions/renderer/script_context.h" #include "third_party/blink/public/platform/web_string.h" #include "third_party/blink/public/web/web_dom_file_system.h" #include "third_party/blink/public/web/web_local_frame.h" namespace extensions { FileBrowserHandlerCustomBindings::FileBrowserHandlerCustomBindings( ScriptContext* context) : ObjectBackedNativeHandler(context) {} void FileBrowserHandlerCustomBindings::AddRoutes() { RouteHandlerFunction( "GetExternalFileEntry", "fileBrowserHandler", base::Bind( &FileBrowserHandlerCustomBindings::GetExternalFileEntryCallback, base::Unretained(this))); } void FileBrowserHandlerCustomBindings::GetExternalFileEntry( const v8::FunctionCallbackInfo<v8::Value>& args, ScriptContext* context) { // TODO(zelidrag): Make this magic work on other platforms when file browser // matures enough on ChromeOS. #if defined(OS_CHROMEOS) CHECK(args.Length() == 1); CHECK(args[0]->IsObject()); v8::Local<v8::Object> file_def = args[0]->ToObject(); v8::Isolate* isolate = args.GetIsolate(); std::string file_system_name(*v8::String::Utf8Value( isolate, file_def->Get(v8::String::NewFromUtf8(isolate, "fileSystemName")))); GURL file_system_root(*v8::String::Utf8Value( isolate, file_def->Get(v8::String::NewFromUtf8(isolate, "fileSystemRoot")))); std::string file_full_path(*v8::String::Utf8Value( isolate, file_def->Get(v8::String::NewFromUtf8(isolate, "fileFullPath")))); bool is_directory = file_def->Get(v8::String::NewFromUtf8(isolate, "fileIsDirectory")) ->ToBoolean() ->Value(); blink::WebDOMFileSystem::EntryType entry_type = is_directory ? blink::WebDOMFileSystem::kEntryTypeDirectory : blink::WebDOMFileSystem::kEntryTypeFile; blink::WebLocalFrame* webframe = blink::WebLocalFrame::FrameForContext(context->v8_context()); args.GetReturnValue().Set( blink::WebDOMFileSystem::Create( webframe, blink::kWebFileSystemTypeExternal, blink::WebString::FromUTF8(file_system_name), file_system_root) .CreateV8Entry(blink::WebString::FromUTF8(file_full_path), entry_type, args.Holder(), isolate)); #endif } void FileBrowserHandlerCustomBindings::GetExternalFileEntryCallback( const v8::FunctionCallbackInfo<v8::Value>& args) { GetExternalFileEntry(args, context()); } } // namespace extensions
null
null
null
null
51,752
17,103
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
182,098
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __ASM_SH_FTRACE_H #define __ASM_SH_FTRACE_H #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ #define FTRACE_SYSCALL_MAX NR_syscalls #ifndef __ASSEMBLY__ extern void mcount(void); #define MCOUNT_ADDR ((unsigned long)(mcount)) #ifdef CONFIG_DYNAMIC_FTRACE #define CALL_ADDR ((long)(ftrace_call)) #define STUB_ADDR ((long)(ftrace_stub)) #define GRAPH_ADDR ((long)(ftrace_graph_call)) #define CALLER_ADDR ((long)(ftrace_caller)) #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) #define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4) struct dyn_arch_ftrace { /* No extra data needed on sh */ }; #endif /* CONFIG_DYNAMIC_FTRACE */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* 'addr' is the memory table address. */ return addr; } #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ #ifndef __ASSEMBLY__ /* arch/sh/kernel/return_address.c */ extern void *return_address(unsigned int); #define ftrace_return_address(n) return_address(n) #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_FTRACE_H */
null
null
null
null
90,445
43,512
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
43,512
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/message_loop/message_pump_glib.h" #include <glib.h> #include <math.h> #include <algorithm> #include <vector> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/callback.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/single_thread_task_runner.h" #include "base/threading/thread.h" #include "base/threading/thread_task_runner_handle.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { namespace { // This class injects dummy "events" into the GLib loop. When "handled" these // events can run tasks. This is intended to mock gtk events (the corresponding // GLib source runs at the same priority). class EventInjector { public: EventInjector() : processed_events_(0) { source_ = static_cast<Source*>(g_source_new(&SourceFuncs, sizeof(Source))); source_->injector = this; g_source_attach(source_, nullptr); g_source_set_can_recurse(source_, TRUE); } ~EventInjector() { g_source_destroy(source_); g_source_unref(source_); } int HandlePrepare() { // If the queue is empty, block. if (events_.empty()) return -1; TimeDelta delta = events_[0].time - Time::NowFromSystemTime(); return std::max(0, static_cast<int>(ceil(delta.InMillisecondsF()))); } bool HandleCheck() { if (events_.empty()) return false; return events_[0].time <= Time::NowFromSystemTime(); } void HandleDispatch() { if (events_.empty()) return; Event event = std::move(events_[0]); events_.erase(events_.begin()); ++processed_events_; if (!event.callback.is_null()) std::move(event.callback).Run(); else if (!event.task.is_null()) std::move(event.task).Run(); } // Adds an event to the queue. When "handled", executes |callback|. // delay_ms is relative to the last event if any, or to Now() otherwise. void AddEvent(int delay_ms, OnceClosure callback) { AddEventHelper(delay_ms, std::move(callback), OnceClosure()); } void AddDummyEvent(int delay_ms) { AddEventHelper(delay_ms, OnceClosure(), OnceClosure()); } void AddEventAsTask(int delay_ms, OnceClosure task) { AddEventHelper(delay_ms, OnceClosure(), std::move(task)); } void Reset() { processed_events_ = 0; events_.clear(); } int processed_events() const { return processed_events_; } private: struct Event { Time time; OnceClosure callback; OnceClosure task; }; struct Source : public GSource { EventInjector* injector; }; void AddEventHelper(int delay_ms, OnceClosure callback, OnceClosure task) { Time last_time; if (!events_.empty()) last_time = (events_.end()-1)->time; else last_time = Time::NowFromSystemTime(); Time future = last_time + TimeDelta::FromMilliseconds(delay_ms); EventInjector::Event event = {future, std::move(callback), std::move(task)}; events_.push_back(std::move(event)); } static gboolean Prepare(GSource* source, gint* timeout_ms) { *timeout_ms = static_cast<Source*>(source)->injector->HandlePrepare(); return FALSE; } static gboolean Check(GSource* source) { return static_cast<Source*>(source)->injector->HandleCheck(); } static gboolean Dispatch(GSource* source, GSourceFunc unused_func, gpointer unused_data) { static_cast<Source*>(source)->injector->HandleDispatch(); return TRUE; } Source* source_; std::vector<Event> events_; int processed_events_; static GSourceFuncs SourceFuncs; DISALLOW_COPY_AND_ASSIGN(EventInjector); }; GSourceFuncs EventInjector::SourceFuncs = {EventInjector::Prepare, EventInjector::Check, EventInjector::Dispatch, nullptr}; void IncrementInt(int *value) { ++*value; } // Checks how many events have been processed by the injector. void ExpectProcessedEvents(EventInjector* injector, int count) { EXPECT_EQ(injector->processed_events(), count); } // Posts a task on the current message loop. void PostMessageLoopTask(const Location& from_here, OnceClosure task) { ThreadTaskRunnerHandle::Get()->PostTask(from_here, std::move(task)); } // Test fixture. class MessagePumpGLibTest : public testing::Test { public: MessagePumpGLibTest() : loop_(nullptr), injector_(nullptr) {} // Overridden from testing::Test: void SetUp() override { loop_ = new MessageLoop(MessageLoop::TYPE_UI); injector_ = new EventInjector(); } void TearDown() override { delete injector_; injector_ = nullptr; delete loop_; loop_ = nullptr; } MessageLoop* loop() const { return loop_; } EventInjector* injector() const { return injector_; } private: MessageLoop* loop_; EventInjector* injector_; DISALLOW_COPY_AND_ASSIGN(MessagePumpGLibTest); }; } // namespace TEST_F(MessagePumpGLibTest, TestQuit) { // Checks that Quit works and that the basic infrastructure is working. // Quit from a task RunLoop().RunUntilIdle(); EXPECT_EQ(0, injector()->processed_events()); injector()->Reset(); // Quit from an event injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure()); RunLoop().Run(); EXPECT_EQ(1, injector()->processed_events()); } TEST_F(MessagePumpGLibTest, TestEventTaskInterleave) { // Checks that tasks posted by events are executed before the next event if // the posted task queue is empty. // MessageLoop doesn't make strong guarantees that it is the case, but the // current implementation ensures it and the tests below rely on it. // If changes cause this test to fail, it is reasonable to change it, but // TestWorkWhileWaitingForEvents and TestEventsWhileWaitingForWork have to be // changed accordingly, otherwise they can become flaky. injector()->AddEventAsTask(0, DoNothing()); OnceClosure check_task = BindOnce(&ExpectProcessedEvents, Unretained(injector()), 2); OnceClosure posted_task = BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task)); injector()->AddEventAsTask(0, std::move(posted_task)); injector()->AddEventAsTask(0, DoNothing()); injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure()); RunLoop().Run(); EXPECT_EQ(4, injector()->processed_events()); injector()->Reset(); injector()->AddEventAsTask(0, DoNothing()); check_task = BindOnce(&ExpectProcessedEvents, Unretained(injector()), 2); posted_task = BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task)); injector()->AddEventAsTask(0, std::move(posted_task)); injector()->AddEventAsTask(10, DoNothing()); injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure()); RunLoop().Run(); EXPECT_EQ(4, injector()->processed_events()); } TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) { int task_count = 0; // Tests that we process tasks while waiting for new events. // The event queue is empty at first. for (int i = 0; i < 10; ++i) { loop()->task_runner()->PostTask(FROM_HERE, BindOnce(&IncrementInt, &task_count)); } // After all the previous tasks have executed, enqueue an event that will // quit. loop()->task_runner()->PostTask( FROM_HERE, BindOnce(&EventInjector::AddEvent, Unretained(injector()), 0, MessageLoop::QuitWhenIdleClosure())); RunLoop().Run(); ASSERT_EQ(10, task_count); EXPECT_EQ(1, injector()->processed_events()); // Tests that we process delayed tasks while waiting for new events. injector()->Reset(); task_count = 0; for (int i = 0; i < 10; ++i) { loop()->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&IncrementInt, &task_count), TimeDelta::FromMilliseconds(10 * i)); } // After all the previous tasks have executed, enqueue an event that will // quit. // This relies on the fact that delayed tasks are executed in delay order. // That is verified in message_loop_unittest.cc. loop()->task_runner()->PostDelayedTask( FROM_HERE, BindOnce(&EventInjector::AddEvent, Unretained(injector()), 10, MessageLoop::QuitWhenIdleClosure()), TimeDelta::FromMilliseconds(150)); RunLoop().Run(); ASSERT_EQ(10, task_count); EXPECT_EQ(1, injector()->processed_events()); } TEST_F(MessagePumpGLibTest, TestEventsWhileWaitingForWork) { // Tests that we process events while waiting for work. // The event queue is empty at first. for (int i = 0; i < 10; ++i) { injector()->AddDummyEvent(0); } // After all the events have been processed, post a task that will check that // the events have been processed (note: the task executes after the event // that posted it has been handled, so we expect 11 at that point). OnceClosure check_task = BindOnce(&ExpectProcessedEvents, Unretained(injector()), 11); OnceClosure posted_task = BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task)); injector()->AddEventAsTask(10, std::move(posted_task)); // And then quit (relies on the condition tested by TestEventTaskInterleave). injector()->AddEvent(10, MessageLoop::QuitWhenIdleClosure()); RunLoop().Run(); EXPECT_EQ(12, injector()->processed_events()); } namespace { // This class is a helper for the concurrent events / posted tasks test below. // It will quit the main loop once enough tasks and events have been processed, // while making sure there is always work to do and events in the queue. class ConcurrentHelper : public RefCounted<ConcurrentHelper> { public: explicit ConcurrentHelper(EventInjector* injector) : injector_(injector), event_count_(kStartingEventCount), task_count_(kStartingTaskCount) { } void FromTask() { if (task_count_ > 0) { --task_count_; } if (task_count_ == 0 && event_count_ == 0) { RunLoop::QuitCurrentWhenIdleDeprecated(); } else { ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, this)); } } void FromEvent() { if (event_count_ > 0) { --event_count_; } if (task_count_ == 0 && event_count_ == 0) { RunLoop::QuitCurrentWhenIdleDeprecated(); } else { injector_->AddEventAsTask(0, BindOnce(&ConcurrentHelper::FromEvent, this)); } } int event_count() const { return event_count_; } int task_count() const { return task_count_; } private: friend class RefCounted<ConcurrentHelper>; ~ConcurrentHelper() {} static const int kStartingEventCount = 20; static const int kStartingTaskCount = 20; EventInjector* injector_; int event_count_; int task_count_; }; } // namespace TEST_F(MessagePumpGLibTest, TestConcurrentEventPostedTask) { // Tests that posted tasks don't starve events, nor the opposite. // We use the helper class above. We keep both event and posted task queues // full, the helper verifies that both tasks and events get processed. // If that is not the case, either event_count_ or task_count_ will not get // to 0, and MessageLoop::QuitWhenIdle() will never be called. scoped_refptr<ConcurrentHelper> helper = new ConcurrentHelper(injector()); // Add 2 events to the queue to make sure it is always full (when we remove // the event before processing it). injector()->AddEventAsTask(0, BindOnce(&ConcurrentHelper::FromEvent, helper)); injector()->AddEventAsTask(0, BindOnce(&ConcurrentHelper::FromEvent, helper)); // Similarly post 2 tasks. loop()->task_runner()->PostTask( FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, helper)); loop()->task_runner()->PostTask( FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, helper)); RunLoop().Run(); EXPECT_EQ(0, helper->event_count()); EXPECT_EQ(0, helper->task_count()); } namespace { void AddEventsAndDrainGLib(EventInjector* injector) { // Add a couple of dummy events injector->AddDummyEvent(0); injector->AddDummyEvent(0); // Then add an event that will quit the main loop. injector->AddEvent(0, MessageLoop::QuitWhenIdleClosure()); // Post a couple of dummy tasks ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing()); ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing()); // Drain the events while (g_main_context_pending(nullptr)) { g_main_context_iteration(nullptr, FALSE); } } } // namespace TEST_F(MessagePumpGLibTest, TestDrainingGLib) { // Tests that draining events using GLib works. loop()->task_runner()->PostTask( FROM_HERE, BindOnce(&AddEventsAndDrainGLib, Unretained(injector()))); RunLoop().Run(); EXPECT_EQ(3, injector()->processed_events()); } namespace { // Helper class that lets us run the GLib message loop. class GLibLoopRunner : public RefCounted<GLibLoopRunner> { public: GLibLoopRunner() : quit_(false) { } void RunGLib() { while (!quit_) { g_main_context_iteration(nullptr, TRUE); } } void RunLoop() { while (!quit_) { g_main_context_iteration(nullptr, TRUE); } } void Quit() { quit_ = true; } void Reset() { quit_ = false; } private: friend class RefCounted<GLibLoopRunner>; ~GLibLoopRunner() {} bool quit_; }; void TestGLibLoopInternal(EventInjector* injector) { // Allow tasks to be processed from 'native' event loops. MessageLoop::current()->SetNestableTasksAllowed(true); scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner(); int task_count = 0; // Add a couple of dummy events injector->AddDummyEvent(0); injector->AddDummyEvent(0); // Post a couple of dummy tasks ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&IncrementInt, &task_count)); ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&IncrementInt, &task_count)); // Delayed events injector->AddDummyEvent(10); injector->AddDummyEvent(10); // Delayed work ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, BindOnce(&IncrementInt, &task_count), TimeDelta::FromMilliseconds(30)); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, BindOnce(&GLibLoopRunner::Quit, runner), TimeDelta::FromMilliseconds(40)); // Run a nested, straight GLib message loop. runner->RunGLib(); ASSERT_EQ(3, task_count); EXPECT_EQ(4, injector->processed_events()); RunLoop::QuitCurrentWhenIdleDeprecated(); } void TestGtkLoopInternal(EventInjector* injector) { // Allow tasks to be processed from 'native' event loops. MessageLoop::current()->SetNestableTasksAllowed(true); scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner(); int task_count = 0; // Add a couple of dummy events injector->AddDummyEvent(0); injector->AddDummyEvent(0); // Post a couple of dummy tasks ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&IncrementInt, &task_count)); ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&IncrementInt, &task_count)); // Delayed events injector->AddDummyEvent(10); injector->AddDummyEvent(10); // Delayed work ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, BindOnce(&IncrementInt, &task_count), TimeDelta::FromMilliseconds(30)); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, BindOnce(&GLibLoopRunner::Quit, runner), TimeDelta::FromMilliseconds(40)); // Run a nested, straight Gtk message loop. runner->RunLoop(); ASSERT_EQ(3, task_count); EXPECT_EQ(4, injector->processed_events()); RunLoop::QuitCurrentWhenIdleDeprecated(); } } // namespace TEST_F(MessagePumpGLibTest, TestGLibLoop) { // Tests that events and posted tasks are correctly executed if the message // loop is not run by MessageLoop::Run() but by a straight GLib loop. // Note that in this case we don't make strong guarantees about niceness // between events and posted tasks. loop()->task_runner()->PostTask( FROM_HERE, BindOnce(&TestGLibLoopInternal, Unretained(injector()))); RunLoop().Run(); } TEST_F(MessagePumpGLibTest, TestGtkLoop) { // Tests that events and posted tasks are correctly executed if the message // loop is not run by MessageLoop::Run() but by a straight Gtk loop. // Note that in this case we don't make strong guarantees about niceness // between events and posted tasks. loop()->task_runner()->PostTask( FROM_HERE, BindOnce(&TestGtkLoopInternal, Unretained(injector()))); RunLoop().Run(); } } // namespace base
null
null
null
null
40,375
327
null
train_val
a6802e21d824e786d1e2a8440cf749a6e1a8d95f
160,455
ImageMagick
0
https://github.com/ImageMagick/ImageMagick
2017-07-18 18:28:29-04:00
/* Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization dedicated to making software imaging solutions freely available. You may not use this file except in compliance with the License. obtain a copy of the License at https://www.imagemagick.org/script/license.php Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. MagickCore security policy methods. */ #ifndef MAGICKCORE_POLICY_H #define MAGICKCORE_POLICY_H #include "MagickCore/pixel.h" #include "MagickCore/exception.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif typedef enum { UndefinedPolicyDomain, CoderPolicyDomain, DelegatePolicyDomain, FilterPolicyDomain, PathPolicyDomain, ResourcePolicyDomain, SystemPolicyDomain, CachePolicyDomain } PolicyDomain; typedef enum { UndefinedPolicyRights = 0x00, NoPolicyRights = 0x00, ReadPolicyRights = 0x01, WritePolicyRights = 0x02, ExecutePolicyRights = 0x04, AllPolicyRights = 0xff } PolicyRights; typedef struct _PolicyInfo PolicyInfo; extern MagickExport char *GetPolicyValue(const char *), **GetPolicyList(const char *,size_t *,ExceptionInfo *); extern MagickExport const PolicyInfo **GetPolicyInfoList(const char *,size_t *,ExceptionInfo *); extern MagickExport MagickBooleanType IsRightsAuthorized(const PolicyDomain,const PolicyRights,const char *), ListPolicyInfo(FILE *,ExceptionInfo *), SetMagickSecurityPolicy(const char *,ExceptionInfo *); #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif
null
null
null
null
72,748
43,324
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
43,324
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/mac/dispatch_source_mach.h" #include <mach/mach.h> #include <memory> #include "base/logging.h" #include "base/mac/scoped_mach_port.h" #include "base/test/test_timeouts.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { class DispatchSourceMachTest : public testing::Test { public: void SetUp() override { mach_port_t port = MACH_PORT_NULL; ASSERT_EQ(KERN_SUCCESS, mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port)); receive_right_.reset(port); ASSERT_EQ(KERN_SUCCESS, mach_port_insert_right(mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND)); send_right_.reset(port); } mach_port_t GetPort() { return receive_right_.get(); } void WaitForSemaphore(dispatch_semaphore_t semaphore) { dispatch_semaphore_wait(semaphore, dispatch_time( DISPATCH_TIME_NOW, TestTimeouts::action_timeout().InSeconds() * NSEC_PER_SEC)); } private: base::mac::ScopedMachReceiveRight receive_right_; base::mac::ScopedMachSendRight send_right_; }; TEST_F(DispatchSourceMachTest, ReceiveAfterResume) { dispatch_semaphore_t signal = dispatch_semaphore_create(0); mach_port_t port = GetPort(); bool __block did_receive = false; DispatchSourceMach source("org.chromium.base.test.ReceiveAfterResume", port, ^{ mach_msg_empty_rcv_t msg = {{0}}; msg.header.msgh_size = sizeof(msg); msg.header.msgh_local_port = port; mach_msg_receive(&msg.header); did_receive = true; dispatch_semaphore_signal(signal); }); mach_msg_empty_send_t msg = {{0}}; msg.header.msgh_size = sizeof(msg); msg.header.msgh_remote_port = port; msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND); ASSERT_EQ(KERN_SUCCESS, mach_msg_send(&msg.header)); EXPECT_FALSE(did_receive); source.Resume(); WaitForSemaphore(signal); dispatch_release(signal); EXPECT_TRUE(did_receive); } TEST_F(DispatchSourceMachTest, NoMessagesAfterDestruction) { mach_port_t port = GetPort(); std::unique_ptr<int> count(new int(0)); int* __block count_ptr = count.get(); std::unique_ptr<DispatchSourceMach> source(new DispatchSourceMach( "org.chromium.base.test.NoMessagesAfterDestruction", port, ^{ mach_msg_empty_rcv_t msg = {{0}}; msg.header.msgh_size = sizeof(msg); msg.header.msgh_local_port = port; mach_msg_receive(&msg.header); LOG(INFO) << "Receieve " << *count_ptr; ++(*count_ptr); })); source->Resume(); dispatch_queue_t queue = dispatch_queue_create("org.chromium.base.test.MessageSend", NULL); dispatch_semaphore_t signal = dispatch_semaphore_create(0); for (int i = 0; i < 30; ++i) { dispatch_async(queue, ^{ mach_msg_empty_send_t msg = {{0}}; msg.header.msgh_size = sizeof(msg); msg.header.msgh_remote_port = port; msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND); mach_msg_send(&msg.header); }); // After sending five messages, shut down the source and taint the // pointer the handler dereferences. The test will crash if |count_ptr| // is being used after "free". if (i == 5) { std::unique_ptr<DispatchSourceMach>* source_ptr = &source; dispatch_async(queue, ^{ source_ptr->reset(); count_ptr = reinterpret_cast<int*>(0xdeaddead); dispatch_semaphore_signal(signal); }); } } WaitForSemaphore(signal); dispatch_release(signal); dispatch_release(queue); } } // namespace base
null
null
null
null
40,187
39,467
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
39,467
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/modules/websockets/inspector_web_socket_events.h" #include <memory> #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/execution_context/execution_context.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/core/inspector/identifiers_factory.h" #include "third_party/blink/renderer/platform/weborigin/kurl.h" namespace blink { std::unique_ptr<TracedValue> InspectorWebSocketCreateEvent::Data( ExecutionContext* execution_context, unsigned long identifier, const KURL& url, const String& protocol) { std::unique_ptr<TracedValue> value = TracedValue::Create(); value->SetInteger("identifier", identifier); value->SetString("url", url.GetString()); if (execution_context->IsDocument()) { value->SetString("frame", IdentifiersFactory::FrameId( ToDocument(execution_context)->GetFrame())); } else { // TODO(nhiroki): Support WorkerGlobalScope (https://crbug.com/825740). NOTREACHED(); } if (!protocol.IsNull()) value->SetString("webSocketProtocol", protocol); SetCallStack(value.get()); return value; } std::unique_ptr<TracedValue> InspectorWebSocketEvent::Data( ExecutionContext* execution_context, unsigned long identifier) { std::unique_ptr<TracedValue> value = TracedValue::Create(); value->SetInteger("identifier", identifier); if (execution_context->IsDocument()) { value->SetString("frame", IdentifiersFactory::FrameId( ToDocument(execution_context)->GetFrame())); } else { // TODO(nhiroki): Support WorkerGlobalScope (https://crbug.com/825740). NOTREACHED(); } SetCallStack(value.get()); return value; } } // namespace blink
null
null
null
null
36,330
6,281
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
171,276
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/module.h> #include <sound/core.h> #include <sound/hwdep.h> #include <sound/pcm.h> #include <sound/initval.h> #define MODNAME "US122L" #include "usb_stream.c" #include "../usbaudio.h" #include "../midi.h" #include "us122l.h" MODULE_AUTHOR("Karsten Wiese <fzu@wemgehoertderstaat.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.5"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ /* Enable this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_us122l_card_used[SNDRV_CARDS]; static int us122l_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data = { .out_ep = 4, .in_ep = 3, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk = { .vendor_name = "US122L", .product_name = NAME_ALLCAPS, .ifnum = 1, .type = QUIRK_MIDI_US122L, .data = &quirk_data }; struct usb_device *dev = US122L(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 1); return snd_usbmidi_create(card, iface, &US122L(card)->midi_list, &quirk); } static int us144_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data = { .out_ep = 4, .in_ep = 3, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk = { .vendor_name = "US144", .product_name = NAME_ALLCAPS, .ifnum = 0, .type = QUIRK_MIDI_US122L, .data = &quirk_data }; struct usb_device *dev = US122L(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 0); return snd_usbmidi_create(card, iface, &US122L(card)->midi_list, &quirk); } /* * Wrapper for usb_control_msg(). * Allocates a temp buffer to prevent dmaing from/to the stack. */ static int us122l_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { int err; void *buf = NULL; if (size > 0) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; } err = usb_control_msg(dev, pipe, request, requesttype, value, index, buf, size, timeout); if (size > 0) { memcpy(data, buf, size); kfree(buf); } return err; } static void pt_info_set(struct usb_device *dev, u8 v) { int ret; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 'I', USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, v, 0, NULL, 0, 1000); snd_printdd(KERN_DEBUG "%i\n", ret); } static void usb_stream_hwdep_vm_open(struct vm_area_struct *area) { struct us122l *us122l = area->vm_private_data; atomic_inc(&us122l->mmap_count); snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); } static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf) { unsigned long offset; struct page *page; void *vaddr; struct us122l *us122l = vmf->vma->vm_private_data; struct usb_stream *s; mutex_lock(&us122l->mutex); s = us122l->sk.s; if (!s) goto unlock; offset = vmf->pgoff << PAGE_SHIFT; if (offset < PAGE_ALIGN(s->read_size)) vaddr = (char *)s + offset; else { offset -= PAGE_ALIGN(s->read_size); if (offset >= PAGE_ALIGN(s->write_size)) goto unlock; vaddr = us122l->sk.write_page + offset; } page = virt_to_page(vaddr); get_page(page); mutex_unlock(&us122l->mutex); vmf->page = page; return 0; unlock: mutex_unlock(&us122l->mutex); return VM_FAULT_SIGBUS; } static void usb_stream_hwdep_vm_close(struct vm_area_struct *area) { struct us122l *us122l = area->vm_private_data; atomic_dec(&us122l->mmap_count); snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); } static const struct vm_operations_struct usb_stream_hwdep_vm_ops = { .open = usb_stream_hwdep_vm_open, .fault = usb_stream_hwdep_vm_fault, .close = usb_stream_hwdep_vm_close, }; static int usb_stream_hwdep_open(struct snd_hwdep *hw, struct file *file) { struct us122l *us122l = hw->private_data; struct usb_interface *iface; snd_printdd(KERN_DEBUG "%p %p\n", hw, file); if (hw->used >= 2) return -EBUSY; if (!us122l->first) us122l->first = file; if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { iface = usb_ifnum_to_if(us122l->dev, 0); usb_autopm_get_interface(iface); } iface = usb_ifnum_to_if(us122l->dev, 1); usb_autopm_get_interface(iface); return 0; } static int usb_stream_hwdep_release(struct snd_hwdep *hw, struct file *file) { struct us122l *us122l = hw->private_data; struct usb_interface *iface; snd_printdd(KERN_DEBUG "%p %p\n", hw, file); if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { iface = usb_ifnum_to_if(us122l->dev, 0); usb_autopm_put_interface(iface); } iface = usb_ifnum_to_if(us122l->dev, 1); usb_autopm_put_interface(iface); if (us122l->first == file) us122l->first = NULL; mutex_lock(&us122l->mutex); if (us122l->master == file) us122l->master = us122l->slave; us122l->slave = NULL; mutex_unlock(&us122l->mutex); return 0; } static int usb_stream_hwdep_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area) { unsigned long size = area->vm_end - area->vm_start; struct us122l *us122l = hw->private_data; unsigned long offset; struct usb_stream *s; int err = 0; bool read; offset = area->vm_pgoff << PAGE_SHIFT; mutex_lock(&us122l->mutex); s = us122l->sk.s; read = offset < s->read_size; if (read && area->vm_flags & VM_WRITE) { err = -EPERM; goto out; } snd_printdd(KERN_DEBUG "%lu %u\n", size, read ? s->read_size : s->write_size); /* if userspace tries to mmap beyond end of our buffer, fail */ if (size > PAGE_ALIGN(read ? s->read_size : s->write_size)) { snd_printk(KERN_WARNING "%lu > %u\n", size, read ? s->read_size : s->write_size); err = -EINVAL; goto out; } area->vm_ops = &usb_stream_hwdep_vm_ops; area->vm_flags |= VM_DONTDUMP; if (!read) area->vm_flags |= VM_DONTEXPAND; area->vm_private_data = us122l; atomic_inc(&us122l->mmap_count); out: mutex_unlock(&us122l->mutex); return err; } static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { struct us122l *us122l = hw->private_data; unsigned *polled; unsigned int mask; poll_wait(file, &us122l->sk.sleep, wait); mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; if (mutex_trylock(&us122l->mutex)) { struct usb_stream *s = us122l->sk.s; if (s && s->state == usb_stream_ready) { if (us122l->first == file) polled = &s->periods_polled; else polled = &us122l->second_periods_polled; if (*polled != s->periods_done) { *polled = s->periods_done; mask = POLLIN | POLLOUT | POLLWRNORM; } else mask = 0; } mutex_unlock(&us122l->mutex); } return mask; } static void us122l_stop(struct us122l *us122l) { struct list_head *p; list_for_each(p, &us122l->midi_list) snd_usbmidi_input_stop(p); usb_stream_stop(&us122l->sk); usb_stream_free(&us122l->sk); } static int us122l_set_sample_rate(struct usb_device *dev, int rate) { unsigned int ep = 0x81; unsigned char data[3]; int err; data[0] = rate; data[1] = rate >> 8; data[2] = rate >> 16; err = us122l_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, data, 3, 1000); if (err < 0) snd_printk(KERN_ERR "%d: cannot set freq %d to ep 0x%x\n", dev->devnum, rate, ep); return err; } static bool us122l_start(struct us122l *us122l, unsigned rate, unsigned period_frames) { struct list_head *p; int err; unsigned use_packsize = 0; bool success = false; if (us122l->dev->speed == USB_SPEED_HIGH) { /* The us-122l's descriptor defaults to iso max_packsize 78, which isn't needed for samplerates <= 48000. Lets save some memory: */ switch (rate) { case 44100: use_packsize = 36; break; case 48000: use_packsize = 42; break; case 88200: use_packsize = 72; break; } } if (!usb_stream_new(&us122l->sk, us122l->dev, 1, 2, rate, use_packsize, period_frames, 6)) goto out; err = us122l_set_sample_rate(us122l->dev, rate); if (err < 0) { us122l_stop(us122l); snd_printk(KERN_ERR "us122l_set_sample_rate error \n"); goto out; } err = usb_stream_start(&us122l->sk); if (err < 0) { us122l_stop(us122l); snd_printk(KERN_ERR "us122l_start error %i \n", err); goto out; } list_for_each(p, &us122l->midi_list) snd_usbmidi_input_start(p); success = true; out: return success; } static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigned cmd, unsigned long arg) { struct usb_stream_config *cfg; struct us122l *us122l = hw->private_data; struct usb_stream *s; unsigned min_period_frames; int err = 0; bool high_speed; if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS) return -ENOTTY; cfg = memdup_user((void *)arg, sizeof(*cfg)); if (IS_ERR(cfg)) return PTR_ERR(cfg); if (cfg->version != USB_STREAM_INTERFACE_VERSION) { err = -ENXIO; goto free; } high_speed = us122l->dev->speed == USB_SPEED_HIGH; if ((cfg->sample_rate != 44100 && cfg->sample_rate != 48000 && (!high_speed || (cfg->sample_rate != 88200 && cfg->sample_rate != 96000))) || cfg->frame_size != 6 || cfg->period_frames > 0x3000) { err = -EINVAL; goto free; } switch (cfg->sample_rate) { case 44100: min_period_frames = 48; break; case 48000: min_period_frames = 52; break; default: min_period_frames = 104; break; } if (!high_speed) min_period_frames <<= 1; if (cfg->period_frames < min_period_frames) { err = -EINVAL; goto free; } snd_power_wait(hw->card, SNDRV_CTL_POWER_D0); mutex_lock(&us122l->mutex); s = us122l->sk.s; if (!us122l->master) us122l->master = file; else if (us122l->master != file) { if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) { err = -EIO; goto unlock; } us122l->slave = file; } if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) || s->state == usb_stream_xrun) { us122l_stop(us122l); if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames)) err = -EIO; else err = 1; } unlock: mutex_unlock(&us122l->mutex); free: kfree(cfg); wake_up_all(&us122l->sk.sleep); return err; } #define SND_USB_STREAM_ID "USB STREAM" static int usb_stream_hwdep_new(struct snd_card *card) { int err; struct snd_hwdep *hw; struct usb_device *dev = US122L(card)->dev; err = snd_hwdep_new(card, SND_USB_STREAM_ID, 0, &hw); if (err < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_USB_STREAM; hw->private_data = US122L(card); hw->ops.open = usb_stream_hwdep_open; hw->ops.release = usb_stream_hwdep_release; hw->ops.ioctl = usb_stream_hwdep_ioctl; hw->ops.ioctl_compat = usb_stream_hwdep_ioctl; hw->ops.mmap = usb_stream_hwdep_mmap; hw->ops.poll = usb_stream_hwdep_poll; sprintf(hw->name, "/proc/bus/usb/%03d/%03d/hwdeppcm", dev->bus->busnum, dev->devnum); return 0; } static bool us122l_create_card(struct snd_card *card) { int err; struct us122l *us122l = US122L(card); if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { err = usb_set_interface(us122l->dev, 0, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); return false; } } err = usb_set_interface(us122l->dev, 1, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); return false; } pt_info_set(us122l->dev, 0x11); pt_info_set(us122l->dev, 0x10); if (!us122l_start(us122l, 44100, 256)) return false; if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) err = us144_create_usbmidi(card); else err = us122l_create_usbmidi(card); if (err < 0) { snd_printk(KERN_ERR "us122l_create_usbmidi error %i \n", err); us122l_stop(us122l); return false; } err = usb_stream_hwdep_new(card); if (err < 0) { /* release the midi resources */ struct list_head *p; list_for_each(p, &us122l->midi_list) snd_usbmidi_disconnect(p); us122l_stop(us122l); return false; } return true; } static void snd_us122l_free(struct snd_card *card) { struct us122l *us122l = US122L(card); int index = us122l->card_index; if (index >= 0 && index < SNDRV_CARDS) snd_us122l_card_used[index] = 0; } static int usx2y_create_card(struct usb_device *device, struct usb_interface *intf, struct snd_card **cardp) { int dev; struct snd_card *card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_us122l_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct us122l), &card); if (err < 0) return err; snd_us122l_card_used[US122L(card)->card_index = dev] = 1; card->private_free = snd_us122l_free; US122L(card)->dev = device; mutex_init(&US122L(card)->mutex); init_waitqueue_head(&US122L(card)->sk.sleep); INIT_LIST_HEAD(&US122L(card)->midi_list); strcpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0, US122L(card)->dev->bus->busnum, US122L(card)->dev->devnum ); *cardp = card; return 0; } static int us122l_usb_probe(struct usb_interface *intf, const struct usb_device_id *device_id, struct snd_card **cardp) { struct usb_device *device = interface_to_usbdev(intf); struct snd_card *card; int err; err = usx2y_create_card(device, intf, &card); if (err < 0) return err; if (!us122l_create_card(card)) { snd_card_free(card); return -EINVAL; } err = snd_card_register(card); if (err < 0) { snd_card_free(card); return err; } usb_get_intf(usb_ifnum_to_if(device, 0)); usb_get_dev(device); *cardp = card; return 0; } static int snd_us122l_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *device = interface_to_usbdev(intf); struct snd_card *card; int err; if ((device->descriptor.idProduct == USB_ID_US144 || device->descriptor.idProduct == USB_ID_US144MKII) && device->speed == USB_SPEED_HIGH) { snd_printk(KERN_ERR "disable ehci-hcd to run US-144 \n"); return -ENODEV; } snd_printdd(KERN_DEBUG"%p:%i\n", intf, intf->cur_altsetting->desc.bInterfaceNumber); if (intf->cur_altsetting->desc.bInterfaceNumber != 1) return 0; err = us122l_usb_probe(usb_get_intf(intf), id, &card); if (err < 0) { usb_put_intf(intf); return err; } usb_set_intfdata(intf, card); return 0; } static void snd_us122l_disconnect(struct usb_interface *intf) { struct snd_card *card; struct us122l *us122l; struct list_head *p; card = usb_get_intfdata(intf); if (!card) return; snd_card_disconnect(card); us122l = US122L(card); mutex_lock(&us122l->mutex); us122l_stop(us122l); mutex_unlock(&us122l->mutex); /* release the midi resources */ list_for_each(p, &us122l->midi_list) { snd_usbmidi_disconnect(p); } usb_put_intf(usb_ifnum_to_if(us122l->dev, 0)); usb_put_intf(usb_ifnum_to_if(us122l->dev, 1)); usb_put_dev(us122l->dev); while (atomic_read(&us122l->mmap_count)) msleep(500); snd_card_free(card); } static int snd_us122l_suspend(struct usb_interface *intf, pm_message_t message) { struct snd_card *card; struct us122l *us122l; struct list_head *p; card = usb_get_intfdata(intf); if (!card) return 0; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); us122l = US122L(card); if (!us122l) return 0; list_for_each(p, &us122l->midi_list) snd_usbmidi_input_stop(p); mutex_lock(&us122l->mutex); usb_stream_stop(&us122l->sk); mutex_unlock(&us122l->mutex); return 0; } static int snd_us122l_resume(struct usb_interface *intf) { struct snd_card *card; struct us122l *us122l; struct list_head *p; int err; card = usb_get_intfdata(intf); if (!card) return 0; us122l = US122L(card); if (!us122l) return 0; mutex_lock(&us122l->mutex); /* needed, doesn't restart without: */ if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { err = usb_set_interface(us122l->dev, 0, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); goto unlock; } } err = usb_set_interface(us122l->dev, 1, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); goto unlock; } pt_info_set(us122l->dev, 0x11); pt_info_set(us122l->dev, 0x10); err = us122l_set_sample_rate(us122l->dev, us122l->sk.s->cfg.sample_rate); if (err < 0) { snd_printk(KERN_ERR "us122l_set_sample_rate error \n"); goto unlock; } err = usb_stream_start(&us122l->sk); if (err) goto unlock; list_for_each(p, &us122l->midi_list) snd_usbmidi_input_start(p); unlock: mutex_unlock(&us122l->mutex); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return err; } static struct usb_device_id snd_us122l_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US122L }, { /* US-144 only works at USB1.1! Disable module ehci-hcd. */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US144 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US122MKII }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US144MKII }, { /* terminator */ } }; MODULE_DEVICE_TABLE(usb, snd_us122l_usb_id_table); static struct usb_driver snd_us122l_usb_driver = { .name = "snd-usb-us122l", .probe = snd_us122l_probe, .disconnect = snd_us122l_disconnect, .suspend = snd_us122l_suspend, .resume = snd_us122l_resume, .reset_resume = snd_us122l_resume, .id_table = snd_us122l_usb_id_table, .supports_autosuspend = 1 }; module_usb_driver(snd_us122l_usb_driver);
null
null
null
null
79,623
23,731
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
188,726
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#define DSS_SUBSYS_NAME "HDMI" #include <linux/kernel.h> #include <linux/err.h> #include <linux/of.h> #include <video/omapfb_dss.h> #include "hdmi.h" int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, struct hdmi_phy_data *phy) { struct property *prop; int r, len; prop = of_find_property(ep, "lanes", &len); if (prop) { u32 lanes[8]; if (len / sizeof(u32) != ARRAY_SIZE(lanes)) { dev_err(&pdev->dev, "bad number of lanes\n"); return -EINVAL; } r = of_property_read_u32_array(ep, "lanes", lanes, ARRAY_SIZE(lanes)); if (r) { dev_err(&pdev->dev, "failed to read lane data\n"); return r; } r = hdmi_phy_parse_lanes(phy, lanes); if (r) { dev_err(&pdev->dev, "failed to parse lane data\n"); return r; } } else { static const u32 default_lanes[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; r = hdmi_phy_parse_lanes(phy, default_lanes); if (WARN_ON(r)) { dev_err(&pdev->dev, "failed to parse lane data\n"); return r; } } return 0; } int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts) { u32 deep_color; bool deep_color_correct = false; if (n == NULL || cts == NULL) return -EINVAL; /* TODO: When implemented, query deep color mode here. */ deep_color = 100; /* * When using deep color, the default N value (as in the HDMI * specification) yields to an non-integer CTS. Hence, we * modify it while keeping the restrictions described in * section 7.2.1 of the HDMI 1.4a specification. */ switch (sample_freq) { case 32000: case 48000: case 96000: case 192000: if (deep_color == 125) if (pclk == 27027000 || pclk == 74250000) deep_color_correct = true; if (deep_color == 150) if (pclk == 27027000) deep_color_correct = true; break; case 44100: case 88200: case 176400: if (deep_color == 125) if (pclk == 27027000) deep_color_correct = true; break; default: return -EINVAL; } if (deep_color_correct) { switch (sample_freq) { case 32000: *n = 8192; break; case 44100: *n = 12544; break; case 48000: *n = 8192; break; case 88200: *n = 25088; break; case 96000: *n = 16384; break; case 176400: *n = 50176; break; case 192000: *n = 32768; break; default: return -EINVAL; } } else { switch (sample_freq) { case 32000: *n = 4096; break; case 44100: *n = 6272; break; case 48000: *n = 6144; break; case 88200: *n = 12544; break; case 96000: *n = 12288; break; case 176400: *n = 25088; break; case 192000: *n = 24576; break; default: return -EINVAL; } } /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ *cts = (pclk/1000) * (*n / 128) * deep_color / (sample_freq / 10); return 0; }
null
null
null
null
97,073
1,149
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,206
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder * Copyright (c) 2012 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder */ #include "avcodec.h" #include "bytestream.h" #include "internal.h" #include "mathops.h" #include "mss34dsp.h" #define HEADER_SIZE 27 #define MODEL2_SCALE 13 #define MODEL_SCALE 15 #define MODEL256_SEC_SCALE 9 typedef struct Model2 { int upd_val, till_rescale; unsigned zero_freq, zero_weight; unsigned total_freq, total_weight; } Model2; typedef struct Model { int weights[16], freqs[16]; int num_syms; int tot_weight; int upd_val, max_upd_val, till_rescale; } Model; typedef struct Model256 { int weights[256], freqs[256]; int tot_weight; int secondary[68]; int sec_size; int upd_val, max_upd_val, till_rescale; } Model256; #define RAC_BOTTOM 0x01000000 typedef struct RangeCoder { const uint8_t *src, *src_end; uint32_t range, low; int got_error; } RangeCoder; enum BlockType { FILL_BLOCK = 0, IMAGE_BLOCK, DCT_BLOCK, HAAR_BLOCK, SKIP_BLOCK }; typedef struct BlockTypeContext { int last_type; Model bt_model[5]; } BlockTypeContext; typedef struct FillBlockCoder { int fill_val; Model coef_model; } FillBlockCoder; typedef struct ImageBlockCoder { Model256 esc_model, vec_entry_model; Model vec_size_model; Model vq_model[125]; } ImageBlockCoder; typedef struct DCTBlockCoder { int *prev_dc; ptrdiff_t prev_dc_stride; int prev_dc_height; int quality; uint16_t qmat[64]; Model dc_model; Model2 sign_model; Model256 ac_model; } DCTBlockCoder; typedef struct HaarBlockCoder { int quality, scale; Model256 coef_model; Model coef_hi_model; } HaarBlockCoder; typedef struct MSS3Context { AVCodecContext *avctx; AVFrame *pic; int got_error; RangeCoder coder; BlockTypeContext btype[3]; FillBlockCoder fill_coder[3]; ImageBlockCoder image_coder[3]; DCTBlockCoder dct_coder[3]; HaarBlockCoder haar_coder[3]; int dctblock[64]; int hblock[16 * 16]; } MSS3Context; static void model2_reset(Model2 *m) { m->zero_weight = 1; m->total_weight = 2; m->zero_freq = 0x1000; m->total_freq = 0x2000; m->upd_val = 4; m->till_rescale = 4; } static void model2_update(Model2 *m, int bit) { unsigned scale; if (!bit) m->zero_weight++; m->till_rescale--; if (m->till_rescale) return; m->total_weight += m->upd_val; if (m->total_weight > 0x2000) { m->total_weight = (m->total_weight + 1) >> 1; m->zero_weight = (m->zero_weight + 1) >> 1; if (m->total_weight == m->zero_weight) m->total_weight = m->zero_weight + 1; } m->upd_val = m->upd_val * 5 >> 2; if (m->upd_val > 64) m->upd_val = 64; scale = 0x80000000u / m->total_weight; m->zero_freq = m->zero_weight * scale >> 18; m->total_freq = m->total_weight * scale >> 18; m->till_rescale = m->upd_val; } static void model_update(Model *m, int val) { int i, sum = 0; unsigned scale; m->weights[val]++; m->till_rescale--; if (m->till_rescale) return; m->tot_weight += m->upd_val; if (m->tot_weight > 0x8000) { m->tot_weight = 0; for (i = 0; i < m->num_syms; i++) { m->weights[i] = (m->weights[i] + 1) >> 1; m->tot_weight += m->weights[i]; } } scale = 0x80000000u / m->tot_weight; for (i = 0; i < m->num_syms; i++) { m->freqs[i] = sum * scale >> 16; sum += m->weights[i]; } m->upd_val = m->upd_val * 5 >> 2; if (m->upd_val > m->max_upd_val) m->upd_val = m->max_upd_val; m->till_rescale = m->upd_val; } static void model_reset(Model *m) { int i; m->tot_weight = 0; for (i = 0; i < m->num_syms - 1; i++) m->weights[i] = 1; m->weights[m->num_syms - 1] = 0; m->upd_val = m->num_syms; m->till_rescale = 1; model_update(m, m->num_syms - 1); m->till_rescale = m->upd_val = (m->num_syms + 6) >> 1; } static av_cold void model_init(Model *m, int num_syms) { m->num_syms = num_syms; m->max_upd_val = 8 * num_syms + 48; model_reset(m); } static void model256_update(Model256 *m, int val) { int i, sum = 0; unsigned scale; int send, sidx = 1; m->weights[val]++; m->till_rescale--; if (m->till_rescale) return; m->tot_weight += m->upd_val; if (m->tot_weight > 0x8000) { m->tot_weight = 0; for (i = 0; i < 256; i++) { m->weights[i] = (m->weights[i] + 1) >> 1; m->tot_weight += m->weights[i]; } } scale = 0x80000000u / m->tot_weight; m->secondary[0] = 0; for (i = 0; i < 256; i++) { m->freqs[i] = sum * scale >> 16; sum += m->weights[i]; send = m->freqs[i] >> MODEL256_SEC_SCALE; while (sidx <= send) m->secondary[sidx++] = i - 1; } while (sidx < m->sec_size) m->secondary[sidx++] = 255; m->upd_val = m->upd_val * 5 >> 2; if (m->upd_val > m->max_upd_val) m->upd_val = m->max_upd_val; m->till_rescale = m->upd_val; } static void model256_reset(Model256 *m) { int i; for (i = 0; i < 255; i++) m->weights[i] = 1; m->weights[255] = 0; m->tot_weight = 0; m->upd_val = 256; m->till_rescale = 1; model256_update(m, 255); m->till_rescale = m->upd_val = (256 + 6) >> 1; } static av_cold void model256_init(Model256 *m) { m->max_upd_val = 8 * 256 + 48; m->sec_size = (1 << 6) + 2; model256_reset(m); } static void rac_init(RangeCoder *c, const uint8_t *src, int size) { int i; c->src = src; c->src_end = src + size; c->low = 0; for (i = 0; i < FFMIN(size, 4); i++) c->low = (c->low << 8) | *c->src++; c->range = 0xFFFFFFFF; c->got_error = 0; } static void rac_normalise(RangeCoder *c) { for (;;) { c->range <<= 8; c->low <<= 8; if (c->src < c->src_end) { c->low |= *c->src++; } else if (!c->low) { c->got_error = 1; c->low = 1; } if (c->range >= RAC_BOTTOM) return; } } static int rac_get_bit(RangeCoder *c) { int bit; c->range >>= 1; bit = (c->range <= c->low); if (bit) c->low -= c->range; if (c->range < RAC_BOTTOM) rac_normalise(c); return bit; } static int rac_get_bits(RangeCoder *c, int nbits) { int val; c->range >>= nbits; val = c->low / c->range; c->low -= c->range * val; if (c->range < RAC_BOTTOM) rac_normalise(c); return val; } static int rac_get_model2_sym(RangeCoder *c, Model2 *m) { int bit, helper; helper = m->zero_freq * (c->range >> MODEL2_SCALE); bit = (c->low >= helper); if (bit) { c->low -= helper; c->range -= helper; } else { c->range = helper; } if (c->range < RAC_BOTTOM) rac_normalise(c); model2_update(m, bit); return bit; } static int rac_get_model_sym(RangeCoder *c, Model *m) { int val; int end, end2; unsigned prob, prob2, helper; prob = 0; prob2 = c->range; c->range >>= MODEL_SCALE; val = 0; end = m->num_syms >> 1; end2 = m->num_syms; do { helper = m->freqs[end] * c->range; if (helper <= c->low) { val = end; prob = helper; } else { end2 = end; prob2 = helper; } end = (end2 + val) >> 1; } while (end != val); c->low -= prob; c->range = prob2 - prob; if (c->range < RAC_BOTTOM) rac_normalise(c); model_update(m, val); return val; } static int rac_get_model256_sym(RangeCoder *c, Model256 *m) { int val; int start, end; int ssym; unsigned prob, prob2, helper; prob2 = c->range; c->range >>= MODEL_SCALE; helper = c->low / c->range; ssym = helper >> MODEL256_SEC_SCALE; val = m->secondary[ssym]; end = start = m->secondary[ssym + 1] + 1; while (end > val + 1) { ssym = (end + val) >> 1; if (m->freqs[ssym] <= helper) { end = start; val = ssym; } else { end = (end + val) >> 1; start = ssym; } } prob = m->freqs[val] * c->range; if (val != 255) prob2 = m->freqs[val + 1] * c->range; c->low -= prob; c->range = prob2 - prob; if (c->range < RAC_BOTTOM) rac_normalise(c); model256_update(m, val); return val; } static int decode_block_type(RangeCoder *c, BlockTypeContext *bt) { bt->last_type = rac_get_model_sym(c, &bt->bt_model[bt->last_type]); return bt->last_type; } static int decode_coeff(RangeCoder *c, Model *m) { int val, sign; val = rac_get_model_sym(c, m); if (val) { sign = rac_get_bit(c); if (val > 1) { val--; val = (1 << val) + rac_get_bits(c, val); } if (!sign) val = -val; } return val; } static void decode_fill_block(RangeCoder *c, FillBlockCoder *fc, uint8_t *dst, ptrdiff_t stride, int block_size) { int i; fc->fill_val += decode_coeff(c, &fc->coef_model); for (i = 0; i < block_size; i++, dst += stride) memset(dst, fc->fill_val, block_size); } static void decode_image_block(RangeCoder *c, ImageBlockCoder *ic, uint8_t *dst, ptrdiff_t stride, int block_size) { int i, j; int vec_size; int vec[4]; int prev_line[16]; int A, B, C; vec_size = rac_get_model_sym(c, &ic->vec_size_model) + 2; for (i = 0; i < vec_size; i++) vec[i] = rac_get_model256_sym(c, &ic->vec_entry_model); for (; i < 4; i++) vec[i] = 0; memset(prev_line, 0, sizeof(prev_line)); for (j = 0; j < block_size; j++) { A = 0; B = 0; for (i = 0; i < block_size; i++) { C = B; B = prev_line[i]; A = rac_get_model_sym(c, &ic->vq_model[A + B * 5 + C * 25]); prev_line[i] = A; if (A < 4) dst[i] = vec[A]; else dst[i] = rac_get_model256_sym(c, &ic->esc_model); } dst += stride; } } static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block, int bx, int by) { int skip, val, sign, pos = 1, zz_pos, dc; int blk_pos = bx + by * bc->prev_dc_stride; memset(block, 0, sizeof(*block) * 64); dc = decode_coeff(c, &bc->dc_model); if (by) { if (bx) { int l, tl, t; l = bc->prev_dc[blk_pos - 1]; tl = bc->prev_dc[blk_pos - 1 - bc->prev_dc_stride]; t = bc->prev_dc[blk_pos - bc->prev_dc_stride]; if (FFABS(t - tl) <= FFABS(l - tl)) dc += l; else dc += t; } else { dc += bc->prev_dc[blk_pos - bc->prev_dc_stride]; } } else if (bx) { dc += bc->prev_dc[bx - 1]; } bc->prev_dc[blk_pos] = dc; block[0] = dc * bc->qmat[0]; while (pos < 64) { val = rac_get_model256_sym(c, &bc->ac_model); if (!val) return 0; if (val == 0xF0) { pos += 16; continue; } skip = val >> 4; val = val & 0xF; if (!val) return -1; pos += skip; if (pos >= 64) return -1; sign = rac_get_model2_sym(c, &bc->sign_model); if (val > 1) { val--; val = (1 << val) + rac_get_bits(c, val); } if (!sign) val = -val; zz_pos = ff_zigzag_direct[pos]; block[zz_pos] = val * bc->qmat[zz_pos]; pos++; } return pos == 64 ? 0 : -1; } static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc, uint8_t *dst, ptrdiff_t stride, int block_size, int *block, int mb_x, int mb_y) { int i, j; int bx, by; int nblocks = block_size >> 3; bx = mb_x * nblocks; by = mb_y * nblocks; for (j = 0; j < nblocks; j++) { for (i = 0; i < nblocks; i++) { if (decode_dct(c, bc, block, bx + i, by + j)) { c->got_error = 1; return; } ff_mss34_dct_put(dst + i * 8, stride, block); } dst += 8 * stride; } } static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc, uint8_t *dst, ptrdiff_t stride, int block_size, int *block) { const int hsize = block_size >> 1; int A, B, C, D, t1, t2, t3, t4; int i, j; for (j = 0; j < block_size; j++) { for (i = 0; i < block_size; i++) { if (i < hsize && j < hsize) block[i] = rac_get_model256_sym(c, &hc->coef_model); else block[i] = decode_coeff(c, &hc->coef_hi_model); block[i] *= hc->scale; } block += block_size; } block -= block_size * block_size; for (j = 0; j < hsize; j++) { for (i = 0; i < hsize; i++) { A = block[i]; B = block[i + hsize]; C = block[i + hsize * block_size]; D = block[i + hsize * block_size + hsize]; t1 = A - B; t2 = C - D; t3 = A + B; t4 = C + D; dst[i * 2] = av_clip_uint8(t1 - t2); dst[i * 2 + stride] = av_clip_uint8(t1 + t2); dst[i * 2 + 1] = av_clip_uint8(t3 - t4); dst[i * 2 + 1 + stride] = av_clip_uint8(t3 + t4); } block += block_size; dst += stride * 2; } } static void reset_coders(MSS3Context *ctx, int quality) { int i, j; for (i = 0; i < 3; i++) { ctx->btype[i].last_type = SKIP_BLOCK; for (j = 0; j < 5; j++) model_reset(&ctx->btype[i].bt_model[j]); ctx->fill_coder[i].fill_val = 0; model_reset(&ctx->fill_coder[i].coef_model); model256_reset(&ctx->image_coder[i].esc_model); model256_reset(&ctx->image_coder[i].vec_entry_model); model_reset(&ctx->image_coder[i].vec_size_model); for (j = 0; j < 125; j++) model_reset(&ctx->image_coder[i].vq_model[j]); if (ctx->dct_coder[i].quality != quality) { ctx->dct_coder[i].quality = quality; ff_mss34_gen_quant_mat(ctx->dct_coder[i].qmat, quality, !i); } memset(ctx->dct_coder[i].prev_dc, 0, sizeof(*ctx->dct_coder[i].prev_dc) * ctx->dct_coder[i].prev_dc_stride * ctx->dct_coder[i].prev_dc_height); model_reset(&ctx->dct_coder[i].dc_model); model2_reset(&ctx->dct_coder[i].sign_model); model256_reset(&ctx->dct_coder[i].ac_model); if (ctx->haar_coder[i].quality != quality) { ctx->haar_coder[i].quality = quality; ctx->haar_coder[i].scale = 17 - 7 * quality / 50; } model_reset(&ctx->haar_coder[i].coef_hi_model); model256_reset(&ctx->haar_coder[i].coef_model); } } static av_cold void init_coders(MSS3Context *ctx) { int i, j; for (i = 0; i < 3; i++) { for (j = 0; j < 5; j++) model_init(&ctx->btype[i].bt_model[j], 5); model_init(&ctx->fill_coder[i].coef_model, 12); model256_init(&ctx->image_coder[i].esc_model); model256_init(&ctx->image_coder[i].vec_entry_model); model_init(&ctx->image_coder[i].vec_size_model, 3); for (j = 0; j < 125; j++) model_init(&ctx->image_coder[i].vq_model[j], 5); model_init(&ctx->dct_coder[i].dc_model, 12); model256_init(&ctx->dct_coder[i].ac_model); model_init(&ctx->haar_coder[i].coef_hi_model, 12); model256_init(&ctx->haar_coder[i].coef_model); } } static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MSS3Context *c = avctx->priv_data; RangeCoder *acoder = &c->coder; GetByteContext gb; uint8_t *dst[3]; int dec_width, dec_height, dec_x, dec_y, quality, keyframe; int x, y, i, mb_width, mb_height, blk_size, btype; int ret; if (buf_size < HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "Frame should have at least %d bytes, got %d instead\n", HEADER_SIZE, buf_size); return AVERROR_INVALIDDATA; } bytestream2_init(&gb, buf, buf_size); keyframe = bytestream2_get_be32(&gb); if (keyframe & ~0x301) { av_log(avctx, AV_LOG_ERROR, "Invalid frame type %X\n", keyframe); return AVERROR_INVALIDDATA; } keyframe = !(keyframe & 1); bytestream2_skip(&gb, 6); dec_x = bytestream2_get_be16(&gb); dec_y = bytestream2_get_be16(&gb); dec_width = bytestream2_get_be16(&gb); dec_height = bytestream2_get_be16(&gb); if (dec_x + dec_width > avctx->width || dec_y + dec_height > avctx->height || (dec_width | dec_height) & 0xF) { av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d +%d,%d\n", dec_width, dec_height, dec_x, dec_y); return AVERROR_INVALIDDATA; } bytestream2_skip(&gb, 4); quality = bytestream2_get_byte(&gb); if (quality < 1 || quality > 100) { av_log(avctx, AV_LOG_ERROR, "Invalid quality setting %d\n", quality); return AVERROR_INVALIDDATA; } bytestream2_skip(&gb, 4); if (keyframe && !bytestream2_get_bytes_left(&gb)) { av_log(avctx, AV_LOG_ERROR, "Keyframe without data found\n"); return AVERROR_INVALIDDATA; } if (!keyframe && c->got_error) return buf_size; c->got_error = 0; if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) return ret; c->pic->key_frame = keyframe; c->pic->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if (!bytestream2_get_bytes_left(&gb)) { if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; *got_frame = 1; return buf_size; } reset_coders(c, quality); rac_init(acoder, buf + HEADER_SIZE, buf_size - HEADER_SIZE); mb_width = dec_width >> 4; mb_height = dec_height >> 4; dst[0] = c->pic->data[0] + dec_x + dec_y * c->pic->linesize[0]; dst[1] = c->pic->data[1] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[1]; dst[2] = c->pic->data[2] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[2]; for (y = 0; y < mb_height; y++) { for (x = 0; x < mb_width; x++) { for (i = 0; i < 3; i++) { blk_size = 8 << !i; btype = decode_block_type(acoder, c->btype + i); switch (btype) { case FILL_BLOCK: decode_fill_block(acoder, c->fill_coder + i, dst[i] + x * blk_size, c->pic->linesize[i], blk_size); break; case IMAGE_BLOCK: decode_image_block(acoder, c->image_coder + i, dst[i] + x * blk_size, c->pic->linesize[i], blk_size); break; case DCT_BLOCK: decode_dct_block(acoder, c->dct_coder + i, dst[i] + x * blk_size, c->pic->linesize[i], blk_size, c->dctblock, x, y); break; case HAAR_BLOCK: decode_haar_block(acoder, c->haar_coder + i, dst[i] + x * blk_size, c->pic->linesize[i], blk_size, c->hblock); break; } if (c->got_error || acoder->got_error) { av_log(avctx, AV_LOG_ERROR, "Error decoding block %d,%d\n", x, y); c->got_error = 1; return AVERROR_INVALIDDATA; } } } dst[0] += c->pic->linesize[0] * 16; dst[1] += c->pic->linesize[1] * 8; dst[2] += c->pic->linesize[2] * 8; } if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; *got_frame = 1; return buf_size; } static av_cold int mss3_decode_end(AVCodecContext *avctx) { MSS3Context * const c = avctx->priv_data; int i; av_frame_free(&c->pic); for (i = 0; i < 3; i++) av_freep(&c->dct_coder[i].prev_dc); return 0; } static av_cold int mss3_decode_init(AVCodecContext *avctx) { MSS3Context * const c = avctx->priv_data; int i; c->avctx = avctx; if ((avctx->width & 0xF) || (avctx->height & 0xF)) { av_log(avctx, AV_LOG_ERROR, "Image dimensions should be a multiple of 16.\n"); return AVERROR_INVALIDDATA; } c->got_error = 0; for (i = 0; i < 3; i++) { int b_width = avctx->width >> (2 + !!i); int b_height = avctx->height >> (2 + !!i); c->dct_coder[i].prev_dc_stride = b_width; c->dct_coder[i].prev_dc_height = b_height; c->dct_coder[i].prev_dc = av_malloc(sizeof(*c->dct_coder[i].prev_dc) * b_width * b_height); if (!c->dct_coder[i].prev_dc) { av_log(avctx, AV_LOG_ERROR, "Cannot allocate buffer\n"); av_frame_free(&c->pic); while (i >= 0) { av_freep(&c->dct_coder[i].prev_dc); i--; } return AVERROR(ENOMEM); } } c->pic = av_frame_alloc(); if (!c->pic) { mss3_decode_end(avctx); return AVERROR(ENOMEM); } avctx->pix_fmt = AV_PIX_FMT_YUV420P; init_coders(c); return 0; } AVCodec ff_msa1_decoder = { .name = "msa1", .long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_MSA1, .priv_data_size = sizeof(MSS3Context), .init = mss3_decode_init, .close = mss3_decode_end, .decode = mss3_decode_frame, .capabilities = AV_CODEC_CAP_DR1, };
null
null
null
null
70,261
1,066
null
train_val
83ed75feba32e46f736fcce0d96a0445f29b96c2
162,910
krb5
0
https://github.com/krb5/krb5
2016-01-27 15:43:28-05:00
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* lib/krb5/os/dnsglue.c */ /* * Copyright 2004, 2009 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ #include "autoconf.h" #ifdef KRB5_DNS_LOOKUP #include "dnsglue.h" #ifdef __APPLE__ #include <dns.h> #endif /* * Only use res_ninit() if there's also a res_ndestroy(), to avoid * memory leaks (Linux & Solaris) and outright corruption (AIX 4.x, * 5.x). While we're at it, make sure res_nsearch() is there too. * * In any case, it is probable that platforms having broken * res_ninit() will have thread safety hacks for res_init() and _res. */ /* * Opaque handle */ struct krb5int_dns_state { int nclass; int ntype; void *ansp; int anslen; int ansmax; #if HAVE_NS_INITPARSE int cur_ans; ns_msg msg; #else unsigned char *ptr; unsigned short nanswers; #endif }; #if !HAVE_NS_INITPARSE static int initparse(struct krb5int_dns_state *); #endif /* * Define macros to use the best available DNS search functions. INIT_HANDLE() * returns true if handle initialization is successful, false if it is not. * SEARCH() returns the length of the response or -1 on error. * DECLARE_HANDLE() must be used last in the declaration list since it may * evaluate to nothing. */ #if defined(__APPLE__) /* Use the OS X interfaces dns_open, dns_search, and dns_free. */ #define DECLARE_HANDLE(h) dns_handle_t h #define INIT_HANDLE(h) ((h = dns_open(NULL)) != NULL) #define SEARCH(h, n, c, t, a, l) dns_search(h, n, c, t, a, l, NULL, NULL) #define DESTROY_HANDLE(h) dns_free(h) #elif HAVE_RES_NINIT && HAVE_RES_NSEARCH /* Use res_ninit, res_nsearch, and res_ndestroy or res_nclose. */ #define DECLARE_HANDLE(h) struct __res_state h #define INIT_HANDLE(h) (memset(&h, 0, sizeof(h)), res_ninit(&h) == 0) #define SEARCH(h, n, c, t, a, l) res_nsearch(&h, n, c, t, a, l) #if HAVE_RES_NDESTROY #define DESTROY_HANDLE(h) res_ndestroy(&h) #else #define DESTROY_HANDLE(h) res_nclose(&h) #endif #else /* Use res_init and res_search. */ #define DECLARE_HANDLE(h) #define INIT_HANDLE(h) (res_init() == 0) #define SEARCH(h, n, c, t, a, l) res_search(n, c, t, a, l) #define DESTROY_HANDLE(h) #endif /* * krb5int_dns_init() * * Initialize an opaque handle. Do name lookup and initial parsing of * reply, skipping question section. Prepare to iterate over answer * section. Returns -1 on error, 0 on success. */ int krb5int_dns_init(struct krb5int_dns_state **dsp, char *host, int nclass, int ntype) { struct krb5int_dns_state *ds; int len, ret; size_t nextincr, maxincr; unsigned char *p; DECLARE_HANDLE(h); *dsp = ds = malloc(sizeof(*ds)); if (ds == NULL) return -1; ret = -1; ds->nclass = nclass; ds->ntype = ntype; ds->ansp = NULL; ds->anslen = 0; ds->ansmax = 0; nextincr = 2048; maxincr = INT_MAX; #if HAVE_NS_INITPARSE ds->cur_ans = 0; #endif if (!INIT_HANDLE(h)) return -1; do { p = (ds->ansp == NULL) ? malloc(nextincr) : realloc(ds->ansp, nextincr); if (p == NULL) { ret = -1; goto errout; } ds->ansp = p; ds->ansmax = nextincr; len = SEARCH(h, host, ds->nclass, ds->ntype, ds->ansp, ds->ansmax); if ((size_t) len > maxincr) { ret = -1; goto errout; } while (nextincr < (size_t) len) nextincr *= 2; if (len < 0 || nextincr > maxincr) { ret = -1; goto errout; } } while (len > ds->ansmax); ds->anslen = len; #if HAVE_NS_INITPARSE ret = ns_initparse(ds->ansp, ds->anslen, &ds->msg); #else ret = initparse(ds); #endif if (ret < 0) goto errout; ret = 0; errout: DESTROY_HANDLE(h); if (ret < 0) { if (ds->ansp != NULL) { free(ds->ansp); ds->ansp = NULL; } } return ret; } #if HAVE_NS_INITPARSE /* * krb5int_dns_nextans - get next matching answer record * * Sets pp to NULL if no more records. Returns -1 on error, 0 on * success. */ int krb5int_dns_nextans(struct krb5int_dns_state *ds, const unsigned char **pp, int *lenp) { int len; ns_rr rr; *pp = NULL; *lenp = 0; while (ds->cur_ans < ns_msg_count(ds->msg, ns_s_an)) { len = ns_parserr(&ds->msg, ns_s_an, ds->cur_ans, &rr); if (len < 0) return -1; ds->cur_ans++; if (ds->nclass == (int)ns_rr_class(rr) && ds->ntype == (int)ns_rr_type(rr)) { *pp = ns_rr_rdata(rr); *lenp = ns_rr_rdlen(rr); return 0; } } return 0; } #endif /* * krb5int_dns_expand - wrapper for dn_expand() */ int krb5int_dns_expand(struct krb5int_dns_state *ds, const unsigned char *p, char *buf, int len) { #if HAVE_NS_NAME_UNCOMPRESS return ns_name_uncompress(ds->ansp, (unsigned char *)ds->ansp + ds->anslen, p, buf, (size_t)len); #else return dn_expand(ds->ansp, (unsigned char *)ds->ansp + ds->anslen, p, buf, len); #endif } /* * Free stuff. */ void krb5int_dns_fini(struct krb5int_dns_state *ds) { if (ds == NULL) return; if (ds->ansp != NULL) free(ds->ansp); free(ds); } /* * Compat routines for BIND 4 */ #if !HAVE_NS_INITPARSE /* * initparse * * Skip header and question section of reply. Set a pointer to the * beginning of the answer section, and prepare to iterate over * answer records. */ static int initparse(struct krb5int_dns_state *ds) { HEADER *hdr; unsigned char *p; unsigned short nqueries, nanswers; int len; #if !HAVE_DN_SKIPNAME char host[MAXDNAME]; #endif if ((size_t) ds->anslen < sizeof(HEADER)) return -1; hdr = (HEADER *)ds->ansp; p = ds->ansp; nqueries = ntohs((unsigned short)hdr->qdcount); nanswers = ntohs((unsigned short)hdr->ancount); p += sizeof(HEADER); /* * Skip query records. */ while (nqueries--) { #if HAVE_DN_SKIPNAME len = dn_skipname(p, (unsigned char *)ds->ansp + ds->anslen); #else len = dn_expand(ds->ansp, (unsigned char *)ds->ansp + ds->anslen, p, host, sizeof(host)); #endif if (len < 0 || !INCR_OK(ds->ansp, ds->anslen, p, len + 4)) return -1; p += len + 4; } ds->ptr = p; ds->nanswers = nanswers; return 0; } /* * krb5int_dns_nextans() - get next answer record * * Sets pp to NULL if no more records. */ int krb5int_dns_nextans(struct krb5int_dns_state *ds, const unsigned char **pp, int *lenp) { int len; unsigned char *p; unsigned short ntype, nclass, rdlen; #if !HAVE_DN_SKIPNAME char host[MAXDNAME]; #endif *pp = NULL; *lenp = 0; p = ds->ptr; while (ds->nanswers--) { #if HAVE_DN_SKIPNAME len = dn_skipname(p, (unsigned char *)ds->ansp + ds->anslen); #else len = dn_expand(ds->ansp, (unsigned char *)ds->ansp + ds->anslen, p, host, sizeof(host)); #endif if (len < 0 || !INCR_OK(ds->ansp, ds->anslen, p, len)) return -1; p += len; SAFE_GETUINT16(ds->ansp, ds->anslen, p, 2, ntype, out); /* Also skip 4 bytes of TTL */ SAFE_GETUINT16(ds->ansp, ds->anslen, p, 6, nclass, out); SAFE_GETUINT16(ds->ansp, ds->anslen, p, 2, rdlen, out); if (!INCR_OK(ds->ansp, ds->anslen, p, rdlen)) return -1; if (rdlen > INT_MAX) return -1; if (nclass == ds->nclass && ntype == ds->ntype) { *pp = p; *lenp = rdlen; ds->ptr = p + rdlen; return 0; } p += rdlen; } return 0; out: return -1; } #endif /* * Try to look up a TXT record pointing to a Kerberos realm */ krb5_error_code k5_try_realm_txt_rr(krb5_context context, const char *prefix, const char *name, char **realm) { krb5_error_code retval = KRB5_ERR_HOST_REALM_UNKNOWN; const unsigned char *p, *base; char host[MAXDNAME]; int ret, rdlen, len; struct krb5int_dns_state *ds = NULL; struct k5buf buf; /* * Form our query, and send it via DNS */ k5_buf_init_fixed(&buf, host, sizeof(host)); if (name == NULL || name[0] == '\0') { k5_buf_add(&buf, prefix); } else { k5_buf_add_fmt(&buf, "%s.%s", prefix, name); /* Realm names don't (normally) end with ".", but if the query doesn't end with "." and doesn't get an answer as is, the resolv code will try appending the local domain. Since the realm names are absolutes, let's stop that. But only if a name has been specified. If we are performing a search on the prefix alone then the intention is to allow the local domain or domain search lists to be expanded. */ if (buf.len > 0 && host[buf.len - 1] != '.') k5_buf_add(&buf, "."); } if (k5_buf_status(&buf) != 0) return KRB5_ERR_HOST_REALM_UNKNOWN; ret = krb5int_dns_init(&ds, host, C_IN, T_TXT); if (ret < 0) { TRACE_TXT_LOOKUP_NOTFOUND(context, host); goto errout; } ret = krb5int_dns_nextans(ds, &base, &rdlen); if (ret < 0 || base == NULL) goto errout; p = base; if (!INCR_OK(base, rdlen, p, 1)) goto errout; len = *p++; *realm = malloc((size_t)len + 1); if (*realm == NULL) { retval = ENOMEM; goto errout; } strncpy(*realm, (const char *)p, (size_t)len); (*realm)[len] = '\0'; /* Avoid a common error. */ if ( (*realm)[len-1] == '.' ) (*realm)[len-1] = '\0'; retval = 0; TRACE_TXT_LOOKUP_SUCCESS(context, host, *realm); errout: if (ds != NULL) { krb5int_dns_fini(ds); ds = NULL; } return retval; } #endif /* KRB5_DNS_LOOKUP */
null
null
null
null
74,218
18,970
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,970
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_SERVICES_FONT_PUBLIC_CPP_FONT_SERVICE_THREAD_H_ #define COMPONENTS_SERVICES_FONT_PUBLIC_CPP_FONT_SERVICE_THREAD_H_ #include <stdint.h> #include <set> #include "base/files/file.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/threading/thread.h" #include "components/services/font/public/interfaces/font_service.mojom.h" #include "third_party/skia/include/core/SkStream.h" #include "third_party/skia/include/core/SkTypeface.h" #include "third_party/skia/include/ports/SkFontConfigInterface.h" namespace font_service { namespace internal { class MappedFontFile; // The thread which services font requests. // // The SkFontConfigInterface is a global singleton which can be accessed from // multiple threads. However, mojo pipes are bound to a single thread. Because // of this mismatch, we create a thread which owns the mojo pipe, sends and // receives messages. The multiple threads which call through FontLoader class // do blocking message calls to this thread. class FontServiceThread : public base::Thread, public base::RefCountedThreadSafe<FontServiceThread> { public: explicit FontServiceThread(mojom::FontServicePtr font_service); // These methods are proxies which run on your thread, post a blocking task // to the FontServiceThread, and wait on an event signaled from the callback. bool MatchFamilyName(const char family_name[], SkFontStyle requested_style, SkFontConfigInterface::FontIdentity* out_font_identity, SkString* out_family_name, SkFontStyle* out_style); scoped_refptr<MappedFontFile> OpenStream( const SkFontConfigInterface::FontIdentity& identity); private: friend class base::RefCountedThreadSafe<FontServiceThread>; ~FontServiceThread() override; // Methods which run on the FontServiceThread. The public MatchFamilyName // calls this method, this method calls the mojo interface, and sets up the // callback to OnMatchFamilyNameComplete. void MatchFamilyNameImpl( base::WaitableEvent* done_event, const char family_name[], SkFontStyle requested_style, bool* out_valid, SkFontConfigInterface::FontIdentity* out_font_identity, SkString* out_family_name, SkFontStyle* out_style); // Called on the FontServiceThread in response to receiving a message from // our MatchFamily mojo IPC. This writes the data returned by mojo, and then // signals |done_event| to wake up the other thread. void OnMatchFamilyNameComplete( base::WaitableEvent* done_event, bool* out_valid, SkFontConfigInterface::FontIdentity* out_font_identity, SkString* out_family_name, SkFontStyle* out_style, mojom::FontIdentityPtr font_identity, const std::string& family_name, mojom::TypefaceStylePtr style); // Implementation of OpenStream; same threading restrictions as MatchFamily. void OpenStreamImpl(base::WaitableEvent* done_event, base::File* output_file, const uint32_t id_number); void OnOpenStreamComplete(base::WaitableEvent* done_event, base::File* output_file, base::File file); // Connection to |font_service_| has gone away. Called on the background // thread. void OnFontServiceConnectionError(); // base::Thread void Init() override; void CleanUp() override; // This member is used to safely pass data from one thread to another. It is // set in the constructor and is consumed in Init(). mojo::InterfacePtrInfo<mojom::FontService> font_service_info_; // This member is set in Init(). It takes |font_service_info_|, which is // non-thread bound, and binds it to the newly created thread. mojo::InterfacePtr<mojom::FontService> font_service_; // All WaitableEvents supplied to OpenStreamImpl() are added here while // waiting on the response from the |font_service_| (FontService::OpenStream() // was called, but the callback has not been processed yet). If // |font_service_| gets an error during this time all events in // |pending_waitable_events_| are signaled. This is necessary as when the // pipe is closed the callbacks are never received. std::set<base::WaitableEvent*> pending_waitable_events_; base::WeakPtrFactory<FontServiceThread> weak_factory_; DISALLOW_COPY_AND_ASSIGN(FontServiceThread); }; } // namespace internal } // namespace font_service #endif // COMPONENTS_SERVICES_FONT_PUBLIC_CPP_FONT_SERVICE_THREAD_H_
null
null
null
null
15,833
2,919
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
155,976
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Silicon Graphics Movie demuxer * Copyright (c) 2012 Peter Ross * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Silicon Graphics Movie demuxer */ #include "libavutil/channel_layout.h" #include "libavutil/eval.h" #include "libavutil/intreadwrite.h" #include "libavutil/rational.h" #include "avformat.h" #include "internal.h" typedef struct MvContext { int nb_video_tracks; int nb_audio_tracks; int eof_count; ///< number of streams that have finished int stream_index; ///< current stream index int frame[2]; ///< frame nb for current stream int acompression; ///< compression level for audio stream int aformat; ///< audio format } MvContext; #define AUDIO_FORMAT_SIGNED 401 static int mv_probe(AVProbeData *p) { if (AV_RB32(p->buf) == MKBETAG('M', 'O', 'V', 'I') && AV_RB16(p->buf + 4) < 3) return AVPROBE_SCORE_MAX; return 0; } static char *var_read_string(AVIOContext *pb, int size) { int n; char *str; if (size < 0 || size == INT_MAX) return NULL; str = av_malloc(size + 1); if (!str) return NULL; n = avio_get_str(pb, size, str, size + 1); if (n < size) avio_skip(pb, size - n); return str; } static int var_read_int(AVIOContext *pb, int size) { int v; char *s = var_read_string(pb, size); if (!s) return 0; v = strtol(s, NULL, 10); av_free(s); return v; } static AVRational var_read_float(AVIOContext *pb, int size) { AVRational v; char *s = var_read_string(pb, size); if (!s) return (AVRational) { 0, 0 }; v = av_d2q(av_strtod(s, NULL), INT_MAX); av_free(s); return v; } static void var_read_metadata(AVFormatContext *avctx, const char *tag, int size) { char *value = var_read_string(avctx->pb, size); if (value) av_dict_set(&avctx->metadata, tag, value, AV_DICT_DONT_STRDUP_VAL); } static int set_channels(AVFormatContext *avctx, AVStream *st, int channels) { if (channels <= 0) { av_log(avctx, AV_LOG_ERROR, "Channel count %d invalid.\n", channels); return AVERROR_INVALIDDATA; } st->codecpar->channels = channels; st->codecpar->channel_layout = (st->codecpar->channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO; return 0; } /** * Parse global variable * @return < 0 if unknown */ static int parse_global_var(AVFormatContext *avctx, AVStream *st, const char *name, int size) { MvContext *mv = avctx->priv_data; AVIOContext *pb = avctx->pb; if (!strcmp(name, "__NUM_I_TRACKS")) { mv->nb_video_tracks = var_read_int(pb, size); } else if (!strcmp(name, "__NUM_A_TRACKS")) { mv->nb_audio_tracks = var_read_int(pb, size); } else if (!strcmp(name, "COMMENT") || !strcmp(name, "TITLE")) { var_read_metadata(avctx, name, size); } else if (!strcmp(name, "LOOP_MODE") || !strcmp(name, "NUM_LOOPS") || !strcmp(name, "OPTIMIZED")) { avio_skip(pb, size); // ignore } else return AVERROR_INVALIDDATA; return 0; } /** * Parse audio variable * @return < 0 if unknown */ static int parse_audio_var(AVFormatContext *avctx, AVStream *st, const char *name, int size) { MvContext *mv = avctx->priv_data; AVIOContext *pb = avctx->pb; if (!strcmp(name, "__DIR_COUNT")) { st->nb_frames = var_read_int(pb, size); } else if (!strcmp(name, "AUDIO_FORMAT")) { mv->aformat = var_read_int(pb, size); } else if (!strcmp(name, "COMPRESSION")) { mv->acompression = var_read_int(pb, size); } else if (!strcmp(name, "DEFAULT_VOL")) { var_read_metadata(avctx, name, size); } else if (!strcmp(name, "NUM_CHANNELS")) { return set_channels(avctx, st, var_read_int(pb, size)); } else if (!strcmp(name, "SAMPLE_RATE")) { st->codecpar->sample_rate = var_read_int(pb, size); avpriv_set_pts_info(st, 33, 1, st->codecpar->sample_rate); } else if (!strcmp(name, "SAMPLE_WIDTH")) { st->codecpar->bits_per_coded_sample = var_read_int(pb, size) * 8; } else return AVERROR_INVALIDDATA; return 0; } /** * Parse video variable * @return < 0 if unknown */ static int parse_video_var(AVFormatContext *avctx, AVStream *st, const char *name, int size) { AVIOContext *pb = avctx->pb; if (!strcmp(name, "__DIR_COUNT")) { st->nb_frames = st->duration = var_read_int(pb, size); } else if (!strcmp(name, "COMPRESSION")) { char *str = var_read_string(pb, size); if (!str) return AVERROR_INVALIDDATA; if (!strcmp(str, "1")) { st->codecpar->codec_id = AV_CODEC_ID_MVC1; } else if (!strcmp(str, "2")) { st->codecpar->format = AV_PIX_FMT_ABGR; st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO; } else if (!strcmp(str, "3")) { st->codecpar->codec_id = AV_CODEC_ID_SGIRLE; } else if (!strcmp(str, "10")) { st->codecpar->codec_id = AV_CODEC_ID_MJPEG; } else if (!strcmp(str, "MVC2")) { st->codecpar->codec_id = AV_CODEC_ID_MVC2; } else { avpriv_request_sample(avctx, "Video compression %s", str); } av_free(str); } else if (!strcmp(name, "FPS")) { AVRational fps = var_read_float(pb, size); avpriv_set_pts_info(st, 64, fps.den, fps.num); st->avg_frame_rate = fps; } else if (!strcmp(name, "HEIGHT")) { st->codecpar->height = var_read_int(pb, size); } else if (!strcmp(name, "PIXEL_ASPECT")) { st->sample_aspect_ratio = var_read_float(pb, size); av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, INT_MAX); } else if (!strcmp(name, "WIDTH")) { st->codecpar->width = var_read_int(pb, size); } else if (!strcmp(name, "ORIENTATION")) { if (var_read_int(pb, size) == 1101) { st->codecpar->extradata = av_strdup("BottomUp"); st->codecpar->extradata_size = 9; } } else if (!strcmp(name, "Q_SPATIAL") || !strcmp(name, "Q_TEMPORAL")) { var_read_metadata(avctx, name, size); } else if (!strcmp(name, "INTERLACING") || !strcmp(name, "PACKING")) { avio_skip(pb, size); // ignore } else return AVERROR_INVALIDDATA; return 0; } static int read_table(AVFormatContext *avctx, AVStream *st, int (*parse)(AVFormatContext *avctx, AVStream *st, const char *name, int size)) { unsigned count; int i; AVIOContext *pb = avctx->pb; avio_skip(pb, 4); count = avio_rb32(pb); avio_skip(pb, 4); for (i = 0; i < count; i++) { char name[17]; int size; if (avio_feof(pb)) return AVERROR_EOF; avio_read(pb, name, 16); name[sizeof(name) - 1] = 0; size = avio_rb32(pb); if (size < 0) { av_log(avctx, AV_LOG_ERROR, "entry size %d is invalid\n", size); return AVERROR_INVALIDDATA; } if (parse(avctx, st, name, size) < 0) { avpriv_request_sample(avctx, "Variable %s", name); avio_skip(pb, size); } } return 0; } static void read_index(AVIOContext *pb, AVStream *st) { uint64_t timestamp = 0; int i; for (i = 0; i < st->nb_frames; i++) { uint32_t pos = avio_rb32(pb); uint32_t size = avio_rb32(pb); avio_skip(pb, 8); av_add_index_entry(st, pos, timestamp, size, 0, AVINDEX_KEYFRAME); if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { timestamp += size / (st->codecpar->channels * 2); } else { timestamp++; } } } static int mv_read_header(AVFormatContext *avctx) { MvContext *mv = avctx->priv_data; AVIOContext *pb = avctx->pb; AVStream *ast = NULL, *vst = NULL; //initialization to suppress warning int version, i; int ret; avio_skip(pb, 4); version = avio_rb16(pb); if (version == 2) { uint64_t timestamp; int v; avio_skip(pb, 22); /* allocate audio track first to prevent unnecessary seeking * (audio packet always precede video packet for a given frame) */ ast = avformat_new_stream(avctx, NULL); if (!ast) return AVERROR(ENOMEM); vst = avformat_new_stream(avctx, NULL); if (!vst) return AVERROR(ENOMEM); avpriv_set_pts_info(vst, 64, 1, 15); vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; vst->avg_frame_rate = av_inv_q(vst->time_base); vst->nb_frames = avio_rb32(pb); v = avio_rb32(pb); switch (v) { case 1: vst->codecpar->codec_id = AV_CODEC_ID_MVC1; break; case 2: vst->codecpar->format = AV_PIX_FMT_ARGB; vst->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO; break; default: avpriv_request_sample(avctx, "Video compression %i", v); break; } vst->codecpar->codec_tag = 0; vst->codecpar->width = avio_rb32(pb); vst->codecpar->height = avio_rb32(pb); avio_skip(pb, 12); ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; ast->nb_frames = vst->nb_frames; ast->codecpar->sample_rate = avio_rb32(pb); if (ast->codecpar->sample_rate <= 0) { av_log(avctx, AV_LOG_ERROR, "Invalid sample rate %d\n", ast->codecpar->sample_rate); return AVERROR_INVALIDDATA; } avpriv_set_pts_info(ast, 33, 1, ast->codecpar->sample_rate); if (set_channels(avctx, ast, avio_rb32(pb)) < 0) return AVERROR_INVALIDDATA; v = avio_rb32(pb); if (v == AUDIO_FORMAT_SIGNED) { ast->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE; } else { avpriv_request_sample(avctx, "Audio compression (format %i)", v); } avio_skip(pb, 12); var_read_metadata(avctx, "title", 0x80); var_read_metadata(avctx, "comment", 0x100); avio_skip(pb, 0x80); timestamp = 0; for (i = 0; i < vst->nb_frames; i++) { uint32_t pos = avio_rb32(pb); uint32_t asize = avio_rb32(pb); uint32_t vsize = avio_rb32(pb); if (avio_feof(pb)) return AVERROR_INVALIDDATA; avio_skip(pb, 8); av_add_index_entry(ast, pos, timestamp, asize, 0, AVINDEX_KEYFRAME); av_add_index_entry(vst, pos + asize, i, vsize, 0, AVINDEX_KEYFRAME); timestamp += asize / (ast->codecpar->channels * 2); } } else if (!version && avio_rb16(pb) == 3) { avio_skip(pb, 4); if ((ret = read_table(avctx, NULL, parse_global_var)) < 0) return ret; if (mv->nb_audio_tracks > 1) { avpriv_request_sample(avctx, "Multiple audio streams support"); return AVERROR_PATCHWELCOME; } else if (mv->nb_audio_tracks) { ast = avformat_new_stream(avctx, NULL); if (!ast) return AVERROR(ENOMEM); ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; if ((read_table(avctx, ast, parse_audio_var)) < 0) return ret; if (mv->acompression == 100 && mv->aformat == AUDIO_FORMAT_SIGNED && ast->codecpar->bits_per_coded_sample == 16) { ast->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE; } else { avpriv_request_sample(avctx, "Audio compression %i (format %i, sr %i)", mv->acompression, mv->aformat, ast->codecpar->bits_per_coded_sample); ast->codecpar->codec_id = AV_CODEC_ID_NONE; } if (ast->codecpar->channels <= 0) { av_log(avctx, AV_LOG_ERROR, "No valid channel count found.\n"); return AVERROR_INVALIDDATA; } } if (mv->nb_video_tracks > 1) { avpriv_request_sample(avctx, "Multiple video streams support"); return AVERROR_PATCHWELCOME; } else if (mv->nb_video_tracks) { vst = avformat_new_stream(avctx, NULL); if (!vst) return AVERROR(ENOMEM); vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; if ((ret = read_table(avctx, vst, parse_video_var))<0) return ret; } if (mv->nb_audio_tracks) read_index(pb, ast); if (mv->nb_video_tracks) read_index(pb, vst); } else { avpriv_request_sample(avctx, "Version %i", version); return AVERROR_PATCHWELCOME; } return 0; } static int mv_read_packet(AVFormatContext *avctx, AVPacket *pkt) { MvContext *mv = avctx->priv_data; AVIOContext *pb = avctx->pb; AVStream *st = avctx->streams[mv->stream_index]; const AVIndexEntry *index; int frame = mv->frame[mv->stream_index]; int64_t ret; uint64_t pos; if (frame < st->nb_index_entries) { index = &st->index_entries[frame]; pos = avio_tell(pb); if (index->pos > pos) avio_skip(pb, index->pos - pos); else if (index->pos < pos) { if (!(pb->seekable & AVIO_SEEKABLE_NORMAL)) return AVERROR(EIO); ret = avio_seek(pb, index->pos, SEEK_SET); if (ret < 0) return ret; } ret = av_get_packet(pb, pkt, index->size); if (ret < 0) return ret; pkt->stream_index = mv->stream_index; pkt->pts = index->timestamp; pkt->flags |= AV_PKT_FLAG_KEY; mv->frame[mv->stream_index]++; mv->eof_count = 0; } else { mv->eof_count++; if (mv->eof_count >= avctx->nb_streams) return AVERROR_EOF; // avoid returning 0 without a packet return AVERROR(EAGAIN); } mv->stream_index++; if (mv->stream_index >= avctx->nb_streams) mv->stream_index = 0; return 0; } static int mv_read_seek(AVFormatContext *avctx, int stream_index, int64_t timestamp, int flags) { MvContext *mv = avctx->priv_data; AVStream *st = avctx->streams[stream_index]; int frame, i; if ((flags & AVSEEK_FLAG_FRAME) || (flags & AVSEEK_FLAG_BYTE)) return AVERROR(ENOSYS); if (!(avctx->pb->seekable & AVIO_SEEKABLE_NORMAL)) return AVERROR(EIO); frame = av_index_search_timestamp(st, timestamp, flags); if (frame < 0) return AVERROR_INVALIDDATA; for (i = 0; i < avctx->nb_streams; i++) mv->frame[i] = frame; return 0; } AVInputFormat ff_mv_demuxer = { .name = "mv", .long_name = NULL_IF_CONFIG_SMALL("Silicon Graphics Movie"), .priv_data_size = sizeof(MvContext), .read_probe = mv_probe, .read_header = mv_read_header, .read_packet = mv_read_packet, .read_seek = mv_read_seek, };
null
null
null
null
72,031
22,076
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
22,076
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/public/common/input_event_ack_state.h" #include "base/logging.h" namespace content { const char* InputEventAckStateToString(InputEventAckState ack_state) { switch (ack_state) { case INPUT_EVENT_ACK_STATE_UNKNOWN: return "UNKNOWN"; case INPUT_EVENT_ACK_STATE_CONSUMED: return "CONSUMED"; case INPUT_EVENT_ACK_STATE_NOT_CONSUMED: return "NOT_CONSUMED"; case INPUT_EVENT_ACK_STATE_CONSUMED_SHOULD_BUBBLE: return "CONSUMED_SHOULD_BUBBLE"; case INPUT_EVENT_ACK_STATE_NO_CONSUMER_EXISTS: return "NO_CONSUMER_EXISTS"; case INPUT_EVENT_ACK_STATE_IGNORED: return "IGNORED"; case INPUT_EVENT_ACK_STATE_SET_NON_BLOCKING: return "SET_NON_BLOCKING"; case INPUT_EVENT_ACK_STATE_SET_NON_BLOCKING_DUE_TO_FLING: return "SET_NON_BLOCKING_DUE_TO_FLING"; } DLOG(WARNING) << "InputEventAckStateToString: Unhandled InputEventAckState."; return ""; } } // namespace content
null
null
null
null
18,939
69,158
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
69,158
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef LIBRARIES_NACL_IO_TEST_FAKE_UTIL_H_ #define LIBRARIES_NACL_IO_TEST_FAKE_UTIL_H_ #include <string> #include <ppapi/c/pp_completion_callback.h> #include "fake_ppapi/fake_filesystem.h" #include "fake_ppapi/fake_resource_manager.h" const int32_t STATUSCODE_NOT_IMPLEMENTED = 501; class FakeFileRefResource : public FakeResource { public: FakeFileRefResource() : filesystem(NULL) {} static const char* classname() { return "FakeFileRefResource"; } FakeFilesystem* filesystem; // Weak reference. FakeFilesystem::Path path; std::string contents; }; class FakeFileSystemResource : public FakeResource { public: FakeFileSystemResource() : filesystem(NULL), opened(false) {} ~FakeFileSystemResource() { delete filesystem; } static const char* classname() { return "FakeFileSystemResource"; } FakeFilesystem* filesystem; // Owned. bool opened; }; class FakeHtml5FsResource : public FakeResource { public: FakeHtml5FsResource() : filesystem_template(NULL) {} static const char* classname() { return "FakeHtml5FsResource"; } FakeFilesystem* filesystem_template; // Weak reference. }; class FakeURLRequestInfoResource : public FakeResource { public: FakeURLRequestInfoResource() : stream_to_file(false) {} static const char* classname() { return "FakeURLRequestInfoResource"; } std::string url; std::string method; std::string headers; std::string body; bool stream_to_file; }; class FakeURLResponseInfoResource : public FakeResource { public: FakeURLResponseInfoResource() : status_code(0) {} static const char* classname() { return "FakeURLResponseInfoResource"; } int status_code; std::string url; std::string headers; }; int32_t RunCompletionCallback(PP_CompletionCallback* callback, int32_t result); bool GetHeaderValue(const std::string& headers, const std::string& key, std::string* out_value); void SetHeader(const std::string& key, const std::string& value, std::string* out_headers); #endif // LIBRARIES_NACL_IO_TEST_FAKE_UTIL_H_
null
null
null
null
66,021
5,327
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
5,327
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_AUTOFILL_FORM_SUGGESTION_LABEL_H_ #define IOS_CHROME_BROWSER_AUTOFILL_FORM_SUGGESTION_LABEL_H_ #import <UIKit/UIKit.h> @class FormSuggestion; @protocol FormSuggestionViewClient; // Class for Autofill suggestion in the customized keyboard. @interface FormSuggestionLabel : UIView // Designated initializer. Initializes with |proposedFrame| and |client| for // |suggestion|. Its width will be adjusted according to the length of // |suggestion| and width in |proposedFrame| is ignored. - (id)initWithSuggestion:(FormSuggestion*)suggestion proposedFrame:(CGRect)proposedFrame index:(NSUInteger)index numSuggestions:(NSUInteger)numSuggestions client:(id<FormSuggestionViewClient>)client; @end #endif // IOS_CHROME_BROWSER_AUTOFILL_FORM_SUGGESTION_LABEL_H_
null
null
null
null
2,190
39,395
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
39,395
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/modules/device_orientation/device_motion_controller.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/renderer/core/frame/deprecation.h" #include "third_party/blink/renderer/core/frame/hosts_using_features.h" #include "third_party/blink/renderer/core/frame/settings.h" #include "third_party/blink/renderer/modules/device_orientation/device_motion_data.h" #include "third_party/blink/renderer/modules/device_orientation/device_motion_dispatcher.h" #include "third_party/blink/renderer/modules/device_orientation/device_motion_event.h" #include "third_party/blink/renderer/modules/device_orientation/device_orientation_controller.h" #include "third_party/blink/renderer/modules/event_modules.h" #include "third_party/blink/renderer/platform/weborigin/security_origin.h" namespace blink { DeviceMotionController::DeviceMotionController(Document& document) : DeviceSingleWindowEventController(document), Supplement<Document>(document) {} DeviceMotionController::~DeviceMotionController() = default; const char DeviceMotionController::kSupplementName[] = "DeviceMotionController"; DeviceMotionController& DeviceMotionController::From(Document& document) { DeviceMotionController* controller = Supplement<Document>::From<DeviceMotionController>(document); if (!controller) { controller = new DeviceMotionController(document); ProvideTo(document, controller); } return *controller; } void DeviceMotionController::DidAddEventListener( LocalDOMWindow* window, const AtomicString& event_type) { if (event_type != EventTypeName()) return; LocalFrame* frame = GetDocument().GetFrame(); if (frame) { if (GetDocument().IsSecureContext()) { UseCounter::Count(frame, WebFeature::kDeviceMotionSecureOrigin); } else { Deprecation::CountDeprecation(frame, WebFeature::kDeviceMotionInsecureOrigin); HostsUsingFeatures::CountAnyWorld( GetDocument(), HostsUsingFeatures::Feature::kDeviceMotionInsecureHost); if (frame->GetSettings()->GetStrictPowerfulFeatureRestrictions()) return; } } if (!has_event_listener_) { Platform::Current()->RecordRapporURL("DeviceSensors.DeviceMotion", WebURL(GetDocument().Url())); if (!IsSameSecurityOriginAsMainFrame()) { Platform::Current()->RecordRapporURL( "DeviceSensors.DeviceMotionCrossOrigin", WebURL(GetDocument().Url())); } if (!CheckPolicyFeatures({mojom::FeaturePolicyFeature::kAccelerometer, mojom::FeaturePolicyFeature::kGyroscope})) { DeviceOrientationController::LogToConsolePolicyFeaturesDisabled( frame, EventTypeName()); return; } } DeviceSingleWindowEventController::DidAddEventListener(window, event_type); } bool DeviceMotionController::HasLastData() { return DeviceMotionDispatcher::Instance().LatestDeviceMotionData(); } void DeviceMotionController::RegisterWithDispatcher() { DeviceMotionDispatcher::Instance().AddController(this); } void DeviceMotionController::UnregisterWithDispatcher() { DeviceMotionDispatcher::Instance().RemoveController(this); } Event* DeviceMotionController::LastEvent() const { return DeviceMotionEvent::Create( EventTypeNames::devicemotion, DeviceMotionDispatcher::Instance().LatestDeviceMotionData()); } bool DeviceMotionController::IsNullEvent(Event* event) const { DeviceMotionEvent* motion_event = ToDeviceMotionEvent(event); return !motion_event->GetDeviceMotionData()->CanProvideEventData(); } const AtomicString& DeviceMotionController::EventTypeName() const { return EventTypeNames::devicemotion; } void DeviceMotionController::Trace(blink::Visitor* visitor) { DeviceSingleWindowEventController::Trace(visitor); Supplement<Document>::Trace(visitor); } } // namespace blink
null
null
null
null
36,258
11,514
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
11,514
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef GPU_IPC_SERVICE_IMAGE_TRANSPORT_SURFACE_H_ #define GPU_IPC_SERVICE_IMAGE_TRANSPORT_SURFACE_H_ #include <stdint.h> #include "base/compiler_specific.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "build/build_config.h" #include "gpu/ipc/common/surface_handle.h" #include "gpu/ipc/service/gpu_ipc_service_export.h" #include "ui/gl/gl_surface.h" namespace gpu { class ImageTransportSurfaceDelegate; // The GPU process is agnostic as to how it displays results. On some platforms // it renders directly to window. On others it renders offscreen and transports // the results to the browser process to display. This file provides a simple // framework for making the offscreen path seem more like the onscreen path. class GPU_IPC_SERVICE_EXPORT ImageTransportSurface { public: #if defined(OS_MACOSX) static void SetAllowOSMesaForTesting(bool allow); #endif // Creates the appropriate native surface depending on the GL implementation. // This will be implemented separately by each platform. On failure, a null // scoped_refptr should be returned. static scoped_refptr<gl::GLSurface> CreateNativeSurface( base::WeakPtr<ImageTransportSurfaceDelegate> stub, SurfaceHandle surface_handle, gl::GLSurfaceFormat format); private: DISALLOW_COPY_AND_ASSIGN(ImageTransportSurface); }; } // namespace gpu #endif // GPU_IPC_SERVICE_IMAGE_TRANSPORT_SURFACE_H_
null
null
null
null
8,377
29,468
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
194,463
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Regulator driver for DA9063 PMIC series * * Copyright 2012 Dialog Semiconductors Ltd. * Copyright 2013 Philipp Zabel, Pengutronix * * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/da9063/core.h> #include <linux/mfd/da9063/pdata.h> #include <linux/mfd/da9063/registers.h> /* Definition for registering regmap bit fields using a mask */ #define BFIELD(_reg, _mask) \ REG_FIELD(_reg, __builtin_ffs((int)_mask) - 1, \ sizeof(unsigned int) * 8 - __builtin_clz((_mask)) - 1) /* Regulator capabilities and registers description */ struct da9063_regulator_info { struct regulator_desc desc; /* Current limiting */ unsigned n_current_limits; const int *current_limits; /* DA9063 main register fields */ struct reg_field mode; /* buck mode of operation */ struct reg_field suspend; struct reg_field sleep; struct reg_field suspend_sleep; unsigned int suspend_vsel_reg; struct reg_field ilimit; /* DA9063 event detection bit */ struct reg_field oc_event; }; /* Macros for LDO */ #define DA9063_LDO(chip, regl_name, min_mV, step_mV, max_mV) \ .desc.id = chip##_ID_##regl_name, \ .desc.name = __stringify(chip##_##regl_name), \ .desc.ops = &da9063_ldo_ops, \ .desc.min_uV = (min_mV) * 1000, \ .desc.uV_step = (step_mV) * 1000, \ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \ + (DA9063_V##regl_name##_BIAS)), \ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ .desc.enable_mask = DA9063_LDO_EN, \ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \ .desc.vsel_mask = DA9063_V##regl_name##_MASK, \ .desc.linear_min_sel = DA9063_V##regl_name##_BIAS, \ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_LDO_SL), \ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_LDO_SL), \ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B /* Macros for voltage DC/DC converters (BUCKs) */ #define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array) \ .desc.id = chip##_ID_##regl_name, \ .desc.name = __stringify(chip##_##regl_name), \ .desc.ops = &da9063_buck_ops, \ .desc.min_uV = (min_mV) * 1000, \ .desc.uV_step = (step_mV) * 1000, \ .desc.n_voltages = ((max_mV) - (min_mV))/(step_mV) + 1, \ .current_limits = limits_array, \ .n_current_limits = ARRAY_SIZE(limits_array) #define DA9063_BUCK_COMMON_FIELDS(regl_name) \ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ .desc.enable_mask = DA9063_BUCK_EN, \ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \ .desc.vsel_mask = DA9063_VBUCK_MASK, \ .desc.linear_min_sel = DA9063_VBUCK_BIAS, \ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_BUCK_SL), \ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_BUCK_SL), \ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \ .mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK) /* Defines asignment of regulators info table to chip model */ struct da9063_dev_model { const struct da9063_regulator_info *regulator_info; unsigned n_regulators; unsigned dev_model; }; /* Single regulator settings */ struct da9063_regulator { struct regulator_desc desc; struct regulator_dev *rdev; struct da9063 *hw; const struct da9063_regulator_info *info; struct regmap_field *mode; struct regmap_field *suspend; struct regmap_field *sleep; struct regmap_field *suspend_sleep; struct regmap_field *ilimit; }; /* Encapsulates all information for the regulators driver */ struct da9063_regulators { unsigned n_regulators; /* Array size to be defined during init. Keep at end. */ struct da9063_regulator regulator[0]; }; /* BUCK modes for DA9063 */ enum { BUCK_MODE_MANUAL, /* 0 */ BUCK_MODE_SLEEP, /* 1 */ BUCK_MODE_SYNC, /* 2 */ BUCK_MODE_AUTO /* 3 */ }; /* Regulator operations */ /* Current limits array (in uA) for BCORE1, BCORE2, BPRO. Entry indexes corresponds to register values. */ static const int da9063_buck_a_limits[] = { 500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000 }; /* Current limits array (in uA) for BMEM, BIO, BPERI. Entry indexes corresponds to register values. */ static const int da9063_buck_b_limits[] = { 1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000 }; /* Current limits array (in uA) for merged BCORE1 and BCORE2. Entry indexes corresponds to register values. */ static const int da9063_bcores_merged_limits[] = { 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000 }; /* Current limits array (in uA) for merged BMEM and BIO. Entry indexes corresponds to register values. */ static const int da9063_bmem_bio_merged_limits[] = { 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000 }; static int da9063_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); const struct da9063_regulator_info *rinfo = regl->info; int n, tval; for (n = 0; n < rinfo->n_current_limits; n++) { tval = rinfo->current_limits[n]; if (tval >= min_uA && tval <= max_uA) return regmap_field_write(regl->ilimit, n); } return -EINVAL; } static int da9063_get_current_limit(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); const struct da9063_regulator_info *rinfo = regl->info; unsigned int sel; int ret; ret = regmap_field_read(regl->ilimit, &sel); if (ret < 0) return ret; if (sel >= rinfo->n_current_limits) sel = rinfo->n_current_limits - 1; return rinfo->current_limits[sel]; } static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); unsigned val; switch (mode) { case REGULATOR_MODE_FAST: val = BUCK_MODE_SYNC; break; case REGULATOR_MODE_NORMAL: val = BUCK_MODE_AUTO; break; case REGULATOR_MODE_STANDBY: val = BUCK_MODE_SLEEP; break; default: return -EINVAL; } return regmap_field_write(regl->mode, val); } /* * Bucks use single mode register field for normal operation * and suspend state. * There are 3 modes to map to: FAST, NORMAL, and STANDBY. */ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); struct regmap_field *field; unsigned int val, mode = 0; int ret; ret = regmap_field_read(regl->mode, &val); if (ret < 0) return ret; switch (val) { default: case BUCK_MODE_MANUAL: mode = REGULATOR_MODE_FAST | REGULATOR_MODE_STANDBY; /* Sleep flag bit decides the mode */ break; case BUCK_MODE_SLEEP: return REGULATOR_MODE_STANDBY; case BUCK_MODE_SYNC: return REGULATOR_MODE_FAST; case BUCK_MODE_AUTO: return REGULATOR_MODE_NORMAL; } /* Detect current regulator state */ ret = regmap_field_read(regl->suspend, &val); if (ret < 0) return 0; /* Read regulator mode from proper register, depending on state */ if (val) field = regl->suspend_sleep; else field = regl->sleep; ret = regmap_field_read(field, &val); if (ret < 0) return 0; if (val) mode &= REGULATOR_MODE_STANDBY; else mode &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST; return mode; } /* * LDOs use sleep flags - one for normal and one for suspend state. * There are 2 modes to map to: NORMAL and STANDBY (sleep) for each state. */ static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); unsigned val; switch (mode) { case REGULATOR_MODE_NORMAL: val = 0; break; case REGULATOR_MODE_STANDBY: val = 1; break; default: return -EINVAL; } return regmap_field_write(regl->sleep, val); } static unsigned da9063_ldo_get_mode(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); struct regmap_field *field; int ret, val; /* Detect current regulator state */ ret = regmap_field_read(regl->suspend, &val); if (ret < 0) return 0; /* Read regulator mode from proper register, depending on state */ if (val) field = regl->suspend_sleep; else field = regl->sleep; ret = regmap_field_read(field, &val); if (ret < 0) return 0; if (val) return REGULATOR_MODE_STANDBY; else return REGULATOR_MODE_NORMAL; } static int da9063_buck_get_status(struct regulator_dev *rdev) { int ret = regulator_is_enabled_regmap(rdev); if (ret == 0) { ret = REGULATOR_STATUS_OFF; } else if (ret > 0) { ret = da9063_buck_get_mode(rdev); if (ret > 0) ret = regulator_mode_to_status(ret); else if (ret == 0) ret = -EIO; } return ret; } static int da9063_ldo_get_status(struct regulator_dev *rdev) { int ret = regulator_is_enabled_regmap(rdev); if (ret == 0) { ret = REGULATOR_STATUS_OFF; } else if (ret > 0) { ret = da9063_ldo_get_mode(rdev); if (ret > 0) ret = regulator_mode_to_status(ret); else if (ret == 0) ret = -EIO; } return ret; } static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); const struct da9063_regulator_info *rinfo = regl->info; int ret, sel; sel = regulator_map_voltage_linear(rdev, uV, uV); if (sel < 0) return sel; sel <<= ffs(rdev->desc->vsel_mask) - 1; ret = regmap_update_bits(regl->hw->regmap, rinfo->suspend_vsel_reg, rdev->desc->vsel_mask, sel); return ret; } static int da9063_suspend_enable(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); return regmap_field_write(regl->suspend, 1); } static int da9063_suspend_disable(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); return regmap_field_write(regl->suspend, 0); } static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); int val; switch (mode) { case REGULATOR_MODE_FAST: val = BUCK_MODE_SYNC; break; case REGULATOR_MODE_NORMAL: val = BUCK_MODE_AUTO; break; case REGULATOR_MODE_STANDBY: val = BUCK_MODE_SLEEP; break; default: return -EINVAL; } return regmap_field_write(regl->mode, val); } static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); unsigned val; switch (mode) { case REGULATOR_MODE_NORMAL: val = 0; break; case REGULATOR_MODE_STANDBY: val = 1; break; default: return -EINVAL; } return regmap_field_write(regl->suspend_sleep, val); } static const struct regulator_ops da9063_buck_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, .set_current_limit = da9063_set_current_limit, .get_current_limit = da9063_get_current_limit, .set_mode = da9063_buck_set_mode, .get_mode = da9063_buck_get_mode, .get_status = da9063_buck_get_status, .set_suspend_voltage = da9063_set_suspend_voltage, .set_suspend_enable = da9063_suspend_enable, .set_suspend_disable = da9063_suspend_disable, .set_suspend_mode = da9063_buck_set_suspend_mode, }; static const struct regulator_ops da9063_ldo_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, .set_mode = da9063_ldo_set_mode, .get_mode = da9063_ldo_get_mode, .get_status = da9063_ldo_get_status, .set_suspend_voltage = da9063_set_suspend_voltage, .set_suspend_enable = da9063_suspend_enable, .set_suspend_disable = da9063_suspend_disable, .set_suspend_mode = da9063_ldo_set_suspend_mode, }; /* Info of regulators for DA9063 */ static const struct da9063_regulator_info da9063_regulator_info[] = { { DA9063_BUCK(DA9063, BCORE1, 300, 10, 1570, da9063_buck_a_limits), DA9063_BUCK_COMMON_FIELDS(BCORE1), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK), }, { DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570, da9063_buck_a_limits), DA9063_BUCK_COMMON_FIELDS(BCORE2), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE2_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C, DA9063_BCORE2_ILIM_MASK), }, { DA9063_BUCK(DA9063, BPRO, 530, 10, 1800, da9063_buck_a_limits), DA9063_BUCK_COMMON_FIELDS(BPRO), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPRO_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B, DA9063_BPRO_ILIM_MASK), }, { DA9063_BUCK(DA9063, BMEM, 800, 20, 3340, da9063_buck_b_limits), DA9063_BUCK_COMMON_FIELDS(BMEM), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK), }, { DA9063_BUCK(DA9063, BIO, 800, 20, 3340, da9063_buck_b_limits), DA9063_BUCK_COMMON_FIELDS(BIO), .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VBIO_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A, DA9063_BIO_ILIM_MASK), }, { DA9063_BUCK(DA9063, BPERI, 800, 20, 3340, da9063_buck_b_limits), DA9063_BUCK_COMMON_FIELDS(BPERI), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPERI_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B, DA9063_BPERI_ILIM_MASK), }, { DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570, da9063_bcores_merged_limits), /* BCORES_MERGED uses the same register fields as BCORE1 */ DA9063_BUCK_COMMON_FIELDS(BCORE1), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK), }, { DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340, da9063_bmem_bio_merged_limits), /* BMEM_BIO_MERGED uses the same register fields as BMEM */ DA9063_BUCK_COMMON_FIELDS(BMEM), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK), }, { DA9063_LDO(DA9063, LDO1, 600, 20, 1860), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO1_SEL), }, { DA9063_LDO(DA9063, LDO2, 600, 20, 1860), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO2_SEL), }, { DA9063_LDO(DA9063, LDO3, 900, 20, 3440), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO3_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM), }, { DA9063_LDO(DA9063, LDO4, 900, 20, 3440), .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VLDO4_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO4_LIM), }, { DA9063_LDO(DA9063, LDO5, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO5_CONT, DA9063_VLDO5_SEL), }, { DA9063_LDO(DA9063, LDO6, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO6_CONT, DA9063_VLDO6_SEL), }, { DA9063_LDO(DA9063, LDO7, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO7_CONT, DA9063_VLDO7_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM), }, { DA9063_LDO(DA9063, LDO8, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO8_CONT, DA9063_VLDO8_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM), }, { DA9063_LDO(DA9063, LDO9, 950, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL), }, { DA9063_LDO(DA9063, LDO10, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO10_CONT, DA9063_VLDO10_SEL), }, { DA9063_LDO(DA9063, LDO11, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO11_CONT, DA9063_VLDO11_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM), }, }; /* Link chip model with regulators info table */ static struct da9063_dev_model regulators_models[] = { { .regulator_info = da9063_regulator_info, .n_regulators = ARRAY_SIZE(da9063_regulator_info), .dev_model = PMIC_DA9063, }, { } }; /* Regulator interrupt handlers */ static irqreturn_t da9063_ldo_lim_event(int irq, void *data) { struct da9063_regulators *regulators = data; struct da9063 *hw = regulators->regulator[0].hw; struct da9063_regulator *regl; int bits, i , ret; ret = regmap_read(hw->regmap, DA9063_REG_STATUS_D, &bits); if (ret < 0) return IRQ_NONE; for (i = regulators->n_regulators - 1; i >= 0; i--) { regl = &regulators->regulator[i]; if (regl->info->oc_event.reg != DA9063_REG_STATUS_D) continue; if (BIT(regl->info->oc_event.lsb) & bits) regulator_notifier_call_chain(regl->rdev, REGULATOR_EVENT_OVER_CURRENT, NULL); } return IRQ_HANDLED; } /* * Probing and Initialisation functions */ static const struct regulator_init_data *da9063_get_regulator_initdata( const struct da9063_regulators_pdata *regl_pdata, int id) { int i; for (i = 0; i < regl_pdata->n_regulators; i++) { if (id == regl_pdata->regulator_data[i].id) return regl_pdata->regulator_data[i].initdata; } return NULL; } #ifdef CONFIG_OF static struct of_regulator_match da9063_matches[] = { [DA9063_ID_BCORE1] = { .name = "bcore1" }, [DA9063_ID_BCORE2] = { .name = "bcore2" }, [DA9063_ID_BPRO] = { .name = "bpro", }, [DA9063_ID_BMEM] = { .name = "bmem", }, [DA9063_ID_BIO] = { .name = "bio", }, [DA9063_ID_BPERI] = { .name = "bperi", }, [DA9063_ID_BCORES_MERGED] = { .name = "bcores-merged" }, [DA9063_ID_BMEM_BIO_MERGED] = { .name = "bmem-bio-merged", }, [DA9063_ID_LDO1] = { .name = "ldo1", }, [DA9063_ID_LDO2] = { .name = "ldo2", }, [DA9063_ID_LDO3] = { .name = "ldo3", }, [DA9063_ID_LDO4] = { .name = "ldo4", }, [DA9063_ID_LDO5] = { .name = "ldo5", }, [DA9063_ID_LDO6] = { .name = "ldo6", }, [DA9063_ID_LDO7] = { .name = "ldo7", }, [DA9063_ID_LDO8] = { .name = "ldo8", }, [DA9063_ID_LDO9] = { .name = "ldo9", }, [DA9063_ID_LDO10] = { .name = "ldo10", }, [DA9063_ID_LDO11] = { .name = "ldo11", }, }; static struct da9063_regulators_pdata *da9063_parse_regulators_dt( struct platform_device *pdev, struct of_regulator_match **da9063_reg_matches) { struct da9063_regulators_pdata *pdata; struct da9063_regulator_data *rdata; struct device_node *node; int i, n, num; node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); if (!node) { dev_err(&pdev->dev, "Regulators device node not found\n"); return ERR_PTR(-ENODEV); } num = of_regulator_match(&pdev->dev, node, da9063_matches, ARRAY_SIZE(da9063_matches)); of_node_put(node); if (num < 0) { dev_err(&pdev->dev, "Failed to match regulators\n"); return ERR_PTR(-EINVAL); } pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->regulator_data = devm_kzalloc(&pdev->dev, num * sizeof(*pdata->regulator_data), GFP_KERNEL); if (!pdata->regulator_data) return ERR_PTR(-ENOMEM); pdata->n_regulators = num; n = 0; for (i = 0; i < ARRAY_SIZE(da9063_matches); i++) { if (!da9063_matches[i].init_data) continue; rdata = &pdata->regulator_data[n]; rdata->id = i; rdata->initdata = da9063_matches[i].init_data; n++; } *da9063_reg_matches = da9063_matches; return pdata; } #else static struct da9063_regulators_pdata *da9063_parse_regulators_dt( struct platform_device *pdev, struct of_regulator_match **da9063_reg_matches) { *da9063_reg_matches = NULL; return ERR_PTR(-ENODEV); } #endif static int da9063_regulator_probe(struct platform_device *pdev) { struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent); struct da9063_pdata *da9063_pdata = dev_get_platdata(da9063->dev); struct of_regulator_match *da9063_reg_matches = NULL; struct da9063_regulators_pdata *regl_pdata; const struct da9063_dev_model *model; struct da9063_regulators *regulators; struct da9063_regulator *regl; struct regulator_config config; bool bcores_merged, bmem_bio_merged; int id, irq, n, n_regulators, ret, val; size_t size; regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL; if (!regl_pdata) regl_pdata = da9063_parse_regulators_dt(pdev, &da9063_reg_matches); if (IS_ERR(regl_pdata) || regl_pdata->n_regulators == 0) { dev_err(&pdev->dev, "No regulators defined for the platform\n"); return PTR_ERR(regl_pdata); } /* Find regulators set for particular device model */ for (model = regulators_models; model->regulator_info; model++) { if (model->dev_model == da9063->model) break; } if (!model->regulator_info) { dev_err(&pdev->dev, "Chip model not recognised (%u)\n", da9063->model); return -ENODEV; } ret = regmap_read(da9063->regmap, DA9063_REG_CONFIG_H, &val); if (ret < 0) { dev_err(&pdev->dev, "Error while reading BUCKs configuration\n"); return ret; } bcores_merged = val & DA9063_BCORE_MERGE; bmem_bio_merged = val & DA9063_BUCK_MERGE; n_regulators = model->n_regulators; if (bcores_merged) n_regulators -= 2; /* remove BCORE1, BCORE2 */ else n_regulators--; /* remove BCORES_MERGED */ if (bmem_bio_merged) n_regulators -= 2; /* remove BMEM, BIO */ else n_regulators--; /* remove BMEM_BIO_MERGED */ /* Allocate memory required by usable regulators */ size = sizeof(struct da9063_regulators) + n_regulators * sizeof(struct da9063_regulator); regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (!regulators) return -ENOMEM; regulators->n_regulators = n_regulators; platform_set_drvdata(pdev, regulators); /* Register all regulators declared in platform information */ n = 0; id = 0; while (n < regulators->n_regulators) { /* Skip regulator IDs depending on merge mode configuration */ switch (id) { case DA9063_ID_BCORE1: case DA9063_ID_BCORE2: if (bcores_merged) { id++; continue; } break; case DA9063_ID_BMEM: case DA9063_ID_BIO: if (bmem_bio_merged) { id++; continue; } break; case DA9063_ID_BCORES_MERGED: if (!bcores_merged) { id++; continue; } break; case DA9063_ID_BMEM_BIO_MERGED: if (!bmem_bio_merged) { id++; continue; } break; } /* Initialise regulator structure */ regl = &regulators->regulator[n]; regl->hw = da9063; regl->info = &model->regulator_info[id]; regl->desc = regl->info->desc; regl->desc.type = REGULATOR_VOLTAGE; regl->desc.owner = THIS_MODULE; if (regl->info->mode.reg) regl->mode = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->mode); if (regl->info->suspend.reg) regl->suspend = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend); if (regl->info->sleep.reg) regl->sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->sleep); if (regl->info->suspend_sleep.reg) regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend_sleep); if (regl->info->ilimit.reg) regl->ilimit = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->ilimit); /* Register regulator */ memset(&config, 0, sizeof(config)); config.dev = &pdev->dev; config.init_data = da9063_get_regulator_initdata(regl_pdata, id); config.driver_data = regl; if (da9063_reg_matches) config.of_node = da9063_reg_matches[id].of_node; config.regmap = da9063->regmap; regl->rdev = devm_regulator_register(&pdev->dev, &regl->desc, &config); if (IS_ERR(regl->rdev)) { dev_err(&pdev->dev, "Failed to register %s regulator\n", regl->desc.name); return PTR_ERR(regl->rdev); } id++; n++; } /* LDOs overcurrent event support */ irq = platform_get_irq_byname(pdev, "LDO_LIM"); if (irq < 0) { dev_err(&pdev->dev, "Failed to get IRQ.\n"); return irq; } ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, da9063_ldo_lim_event, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "LDO_LIM", regulators); if (ret) { dev_err(&pdev->dev, "Failed to request LDO_LIM IRQ.\n"); return ret; } return 0; } static struct platform_driver da9063_regulator_driver = { .driver = { .name = DA9063_DRVNAME_REGULATORS, }, .probe = da9063_regulator_probe, }; static int __init da9063_regulator_init(void) { return platform_driver_register(&da9063_regulator_driver); } subsys_initcall(da9063_regulator_init); static void __exit da9063_regulator_cleanup(void) { platform_driver_unregister(&da9063_regulator_driver); } module_exit(da9063_regulator_cleanup); /* Module information */ MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>"); MODULE_DESCRIPTION("DA9063 regulators driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DA9063_DRVNAME_REGULATORS);
null
null
null
null
102,810
39,556
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
39,556
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_ENCRYPTEDMEDIA_ENCRYPTED_MEDIA_UTILS_H_ #define THIRD_PARTY_BLINK_RENDERER_MODULES_ENCRYPTEDMEDIA_ENCRYPTED_MEDIA_UTILS_H_ #include "third_party/blink/public/platform/web_encrypted_media_key_information.h" #include "third_party/blink/public/platform/web_encrypted_media_types.h" #include "third_party/blink/renderer/platform/wtf/allocator.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" namespace blink { class EncryptedMediaUtils { STATIC_ONLY(EncryptedMediaUtils); public: static WebEncryptedMediaInitDataType ConvertToInitDataType( const String& init_data_type); static String ConvertFromInitDataType(WebEncryptedMediaInitDataType); static WebEncryptedMediaSessionType ConvertToSessionType( const String& session_type); static String ConvertFromSessionType(WebEncryptedMediaSessionType); static String ConvertKeyStatusToString( const WebEncryptedMediaKeyInformation::KeyStatus); }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_MODULES_ENCRYPTEDMEDIA_ENCRYPTED_MEDIA_UTILS_H_
null
null
null
null
36,419
21,164
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
186,159
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* ZD1211 USB-WLAN driver for Linux * * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> * Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/firmware.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <linux/module.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "zd_def.h" #include "zd_mac.h" #include "zd_usb.h" static struct usb_device_id usb_ids[] = { /* ZD1211 */ { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3409), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0b3b, 0x1630), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0df6, 0x9075), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x14ea, 0xab10), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, /* ZD1211B */ { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0xe501), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0b05, 0x171b), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x2019, 0xed01), .driver_info = DEVICE_ZD1211B }, /* "Driverless" devices that need ejecting */ { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, {} }; MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("USB driver for devices with the ZD1211 chip."); MODULE_AUTHOR("Ulrich Kunitz"); MODULE_AUTHOR("Daniel Drake"); MODULE_VERSION("1.0"); MODULE_DEVICE_TABLE(usb, usb_ids); #define FW_ZD1211_PREFIX "zd1211/zd1211_" #define FW_ZD1211B_PREFIX "zd1211/zd1211b_" static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req, unsigned int count); /* USB device initialization */ static void int_urb_complete(struct urb *urb); static int request_fw_file( const struct firmware **fw, const char *name, struct device *device) { int r; dev_dbg_f(device, "fw name %s\n", name); r = request_firmware(fw, name, device); if (r) dev_err(device, "Could not load firmware file %s. Error number %d\n", name, r); return r; } static inline u16 get_bcdDevice(const struct usb_device *udev) { return le16_to_cpu(udev->descriptor.bcdDevice); } enum upload_code_flags { REBOOT = 1, }; /* Ensures that MAX_TRANSFER_SIZE is even. */ #define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1) static int upload_code(struct usb_device *udev, const u8 *data, size_t size, u16 code_offset, int flags) { u8 *p; int r; /* USB request blocks need "kmalloced" buffers. */ p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL); if (!p) { r = -ENOMEM; goto error; } size &= ~1; while (size > 0) { size_t transfer_size = size <= MAX_TRANSFER_SIZE ? size : MAX_TRANSFER_SIZE; dev_dbg_f(&udev->dev, "transfer size %zu\n", transfer_size); memcpy(p, data, transfer_size); r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_FIRMWARE_DOWNLOAD, USB_DIR_OUT | USB_TYPE_VENDOR, code_offset, 0, p, transfer_size, 1000 /* ms */); if (r < 0) { dev_err(&udev->dev, "USB control request for firmware upload" " failed. Error number %d\n", r); goto error; } transfer_size = r & ~1; size -= transfer_size; data += transfer_size; code_offset += transfer_size/sizeof(u16); } if (flags & REBOOT) { u8 ret; /* Use "DMA-aware" buffer. */ r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_REQ_FIRMWARE_CONFIRM, USB_DIR_IN | USB_TYPE_VENDOR, 0, 0, p, sizeof(ret), 5000 /* ms */); if (r != sizeof(ret)) { dev_err(&udev->dev, "control request firmware confirmation failed." " Return value %d\n", r); if (r >= 0) r = -ENODEV; goto error; } ret = p[0]; if (ret & 0x80) { dev_err(&udev->dev, "Internal error while downloading." " Firmware confirm return value %#04x\n", (unsigned int)ret); r = -ENODEV; goto error; } dev_dbg_f(&udev->dev, "firmware confirm return value %#04x\n", (unsigned int)ret); } r = 0; error: kfree(p); return r; } static u16 get_word(const void *data, u16 offset) { const __le16 *p = data; return le16_to_cpu(p[offset]); } static char *get_fw_name(struct zd_usb *usb, char *buffer, size_t size, const char* postfix) { scnprintf(buffer, size, "%s%s", usb->is_zd1211b ? FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX, postfix); return buffer; } static int handle_version_mismatch(struct zd_usb *usb, const struct firmware *ub_fw) { struct usb_device *udev = zd_usb_to_usbdev(usb); const struct firmware *ur_fw = NULL; int offset; int r = 0; char fw_name[128]; r = request_fw_file(&ur_fw, get_fw_name(usb, fw_name, sizeof(fw_name), "ur"), &udev->dev); if (r) goto error; r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START, REBOOT); if (r) goto error; offset = (E2P_BOOT_CODE_OFFSET * sizeof(u16)); r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset, E2P_START + E2P_BOOT_CODE_OFFSET, REBOOT); /* At this point, the vendor driver downloads the whole firmware * image, hacks around with version IDs, and uploads it again, * completely overwriting the boot code. We do not do this here as * it is not required on any tested devices, and it is suspected to * cause problems. */ error: release_firmware(ur_fw); return r; } static int upload_firmware(struct zd_usb *usb) { int r; u16 fw_bcdDevice; u16 bcdDevice; struct usb_device *udev = zd_usb_to_usbdev(usb); const struct firmware *ub_fw = NULL; const struct firmware *uph_fw = NULL; char fw_name[128]; bcdDevice = get_bcdDevice(udev); r = request_fw_file(&ub_fw, get_fw_name(usb, fw_name, sizeof(fw_name), "ub"), &udev->dev); if (r) goto error; fw_bcdDevice = get_word(ub_fw->data, E2P_DATA_OFFSET); if (fw_bcdDevice != bcdDevice) { dev_info(&udev->dev, "firmware version %#06x and device bootcode version " "%#06x differ\n", fw_bcdDevice, bcdDevice); if (bcdDevice <= 0x4313) dev_warn(&udev->dev, "device has old bootcode, please " "report success or failure\n"); r = handle_version_mismatch(usb, ub_fw); if (r) goto error; } else { dev_dbg_f(&udev->dev, "firmware device id %#06x is equal to the " "actual device id\n", fw_bcdDevice); } r = request_fw_file(&uph_fw, get_fw_name(usb, fw_name, sizeof(fw_name), "uphr"), &udev->dev); if (r) goto error; r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START, REBOOT); if (r) { dev_err(&udev->dev, "Could not upload firmware code uph. Error number %d\n", r); } /* FALL-THROUGH */ error: release_firmware(ub_fw); release_firmware(uph_fw); return r; } MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ur"); MODULE_FIRMWARE(FW_ZD1211_PREFIX "ur"); MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ub"); MODULE_FIRMWARE(FW_ZD1211_PREFIX "ub"); MODULE_FIRMWARE(FW_ZD1211B_PREFIX "uphr"); MODULE_FIRMWARE(FW_ZD1211_PREFIX "uphr"); /* Read data from device address space using "firmware interface" which does * not require firmware to be loaded. */ int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len) { int r; struct usb_device *udev = zd_usb_to_usbdev(usb); u8 *buf; /* Use "DMA-aware" buffer. */ buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0, buf, len, 5000); if (r < 0) { dev_err(&udev->dev, "read over firmware interface failed: %d\n", r); goto exit; } else if (r != len) { dev_err(&udev->dev, "incomplete read over firmware interface: %d/%d\n", r, len); r = -EIO; goto exit; } r = 0; memcpy(data, buf, len); exit: kfree(buf); return r; } #define urb_dev(urb) (&(urb)->dev->dev) static inline void handle_regs_int_override(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_usb_interrupt *intr = &usb->intr; spin_lock(&intr->lock); if (atomic_read(&intr->read_regs_enabled)) { atomic_set(&intr->read_regs_enabled, 0); intr->read_regs_int_overridden = 1; complete(&intr->read_regs.completion); } spin_unlock(&intr->lock); } static inline void handle_regs_int(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_usb_interrupt *intr = &usb->intr; int len; u16 int_num; ZD_ASSERT(in_interrupt()); spin_lock(&intr->lock); int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); if (int_num == CR_INTERRUPT) { struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); spin_lock(&mac->lock); memcpy(&mac->intr_buffer, urb->transfer_buffer, USB_MAX_EP_INT_BUFFER); spin_unlock(&mac->lock); schedule_work(&mac->process_intr); } else if (atomic_read(&intr->read_regs_enabled)) { len = urb->actual_length; intr->read_regs.length = urb->actual_length; if (len > sizeof(intr->read_regs.buffer)) len = sizeof(intr->read_regs.buffer); memcpy(intr->read_regs.buffer, urb->transfer_buffer, len); /* Sometimes USB_INT_ID_REGS is not overridden, but comes after * USB_INT_ID_RETRY_FAILED. Read-reg retry then gets this * delayed USB_INT_ID_REGS, but leaves USB_INT_ID_REGS of * retry unhandled. Next read-reg command then might catch * this wrong USB_INT_ID_REGS. Fix by ignoring wrong reads. */ if (!check_read_regs(usb, intr->read_regs.req, intr->read_regs.req_count)) goto out; atomic_set(&intr->read_regs_enabled, 0); intr->read_regs_int_overridden = 0; complete(&intr->read_regs.completion); goto out; } out: spin_unlock(&intr->lock); /* CR_INTERRUPT might override read_reg too. */ if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled)) handle_regs_int_override(urb); } static void int_urb_complete(struct urb *urb) { int r; struct usb_int_header *hdr; struct zd_usb *usb; struct zd_usb_interrupt *intr; switch (urb->status) { case 0: break; case -ESHUTDOWN: case -EINVAL: case -ENODEV: case -ENOENT: case -ECONNRESET: case -EPIPE: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); return; default: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); goto resubmit; } if (urb->actual_length < sizeof(hdr)) { dev_dbg_f(urb_dev(urb), "error: urb %p to small\n", urb); goto resubmit; } hdr = urb->transfer_buffer; if (hdr->type != USB_INT_TYPE) { dev_dbg_f(urb_dev(urb), "error: urb %p wrong type\n", urb); goto resubmit; } /* USB_INT_ID_RETRY_FAILED triggered by tx-urb submit can override * pending USB_INT_ID_REGS causing read command timeout. */ usb = urb->context; intr = &usb->intr; if (hdr->id != USB_INT_ID_REGS && atomic_read(&intr->read_regs_enabled)) handle_regs_int_override(urb); switch (hdr->id) { case USB_INT_ID_REGS: handle_regs_int(urb); break; case USB_INT_ID_RETRY_FAILED: zd_mac_tx_failed(urb); break; default: dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb, (unsigned int)hdr->id); goto resubmit; } resubmit: r = usb_submit_urb(urb, GFP_ATOMIC); if (r) { dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n", urb, r); /* TODO: add worker to reset intr->urb */ } return; } static inline int int_urb_interval(struct usb_device *udev) { switch (udev->speed) { case USB_SPEED_HIGH: return 4; case USB_SPEED_LOW: return 10; case USB_SPEED_FULL: default: return 1; } } static inline int usb_int_enabled(struct zd_usb *usb) { unsigned long flags; struct zd_usb_interrupt *intr = &usb->intr; struct urb *urb; spin_lock_irqsave(&intr->lock, flags); urb = intr->urb; spin_unlock_irqrestore(&intr->lock, flags); return urb != NULL; } int zd_usb_enable_int(struct zd_usb *usb) { int r; struct usb_device *udev = zd_usb_to_usbdev(usb); struct zd_usb_interrupt *intr = &usb->intr; struct urb *urb; dev_dbg_f(zd_usb_dev(usb), "\n"); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { r = -ENOMEM; goto out; } ZD_ASSERT(!irqs_disabled()); spin_lock_irq(&intr->lock); if (intr->urb) { spin_unlock_irq(&intr->lock); r = 0; goto error_free_urb; } intr->urb = urb; spin_unlock_irq(&intr->lock); r = -ENOMEM; intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER, GFP_KERNEL, &intr->buffer_dma); if (!intr->buffer) { dev_dbg_f(zd_usb_dev(usb), "couldn't allocate transfer_buffer\n"); goto error_set_urb_null; } usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN), intr->buffer, USB_MAX_EP_INT_BUFFER, int_urb_complete, usb, intr->interval); urb->transfer_dma = intr->buffer_dma; urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb); r = usb_submit_urb(urb, GFP_KERNEL); if (r) { dev_dbg_f(zd_usb_dev(usb), "Couldn't submit urb. Error number %d\n", r); goto error; } return 0; error: usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER, intr->buffer, intr->buffer_dma); error_set_urb_null: spin_lock_irq(&intr->lock); intr->urb = NULL; spin_unlock_irq(&intr->lock); error_free_urb: usb_free_urb(urb); out: return r; } void zd_usb_disable_int(struct zd_usb *usb) { unsigned long flags; struct usb_device *udev = zd_usb_to_usbdev(usb); struct zd_usb_interrupt *intr = &usb->intr; struct urb *urb; void *buffer; dma_addr_t buffer_dma; spin_lock_irqsave(&intr->lock, flags); urb = intr->urb; if (!urb) { spin_unlock_irqrestore(&intr->lock, flags); return; } intr->urb = NULL; buffer = intr->buffer; buffer_dma = intr->buffer_dma; intr->buffer = NULL; spin_unlock_irqrestore(&intr->lock, flags); usb_kill_urb(urb); dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); usb_free_urb(urb); if (buffer) usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER, buffer, buffer_dma); } static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, unsigned int length) { int i; const struct rx_length_info *length_info; if (length < sizeof(struct rx_length_info)) { /* It's not a complete packet anyhow. */ dev_dbg_f(zd_usb_dev(usb), "invalid, small RX packet : %d\n", length); return; } length_info = (struct rx_length_info *) (buffer + length - sizeof(struct rx_length_info)); /* It might be that three frames are merged into a single URB * transaction. We have to check for the length info tag. * * While testing we discovered that length_info might be unaligned, * because if USB transactions are merged, the last packet will not * be padded. Unaligned access might also happen if the length_info * structure is not present. */ if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG) { unsigned int l, k, n; for (i = 0, l = 0;; i++) { k = get_unaligned_le16(&length_info->length[i]); if (k == 0) return; n = l+k; if (n > length) return; zd_mac_rx(zd_usb_to_hw(usb), buffer+l, k); if (i >= 2) return; l = (n+3) & ~3; } } else { zd_mac_rx(zd_usb_to_hw(usb), buffer, length); } } static void rx_urb_complete(struct urb *urb) { int r; struct zd_usb *usb; struct zd_usb_rx *rx; const u8 *buffer; unsigned int length; switch (urb->status) { case 0: break; case -ESHUTDOWN: case -EINVAL: case -ENODEV: case -ENOENT: case -ECONNRESET: case -EPIPE: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); return; default: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); goto resubmit; } buffer = urb->transfer_buffer; length = urb->actual_length; usb = urb->context; rx = &usb->rx; tasklet_schedule(&rx->reset_timer_tasklet); if (length%rx->usb_packet_size > rx->usb_packet_size-4) { /* If there is an old first fragment, we don't care. */ dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment)); spin_lock(&rx->lock); memcpy(rx->fragment, buffer, length); rx->fragment_length = length; spin_unlock(&rx->lock); goto resubmit; } spin_lock(&rx->lock); if (rx->fragment_length > 0) { /* We are on a second fragment, we believe */ ZD_ASSERT(length + rx->fragment_length <= ARRAY_SIZE(rx->fragment)); dev_dbg_f(urb_dev(urb), "*** second fragment ***\n"); memcpy(rx->fragment+rx->fragment_length, buffer, length); handle_rx_packet(usb, rx->fragment, rx->fragment_length + length); rx->fragment_length = 0; spin_unlock(&rx->lock); } else { spin_unlock(&rx->lock); handle_rx_packet(usb, buffer, length); } resubmit: r = usb_submit_urb(urb, GFP_ATOMIC); if (r) dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r); } static struct urb *alloc_rx_urb(struct zd_usb *usb) { struct usb_device *udev = zd_usb_to_usbdev(usb); struct urb *urb; void *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return NULL; buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL, &urb->transfer_dma); if (!buffer) { usb_free_urb(urb); return NULL; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), buffer, USB_MAX_RX_SIZE, rx_urb_complete, usb); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return urb; } static void free_rx_urb(struct urb *urb) { if (!urb) return; usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } static int __zd_usb_enable_rx(struct zd_usb *usb) { int i, r; struct zd_usb_rx *rx = &usb->rx; struct urb **urbs; dev_dbg_f(zd_usb_dev(usb), "\n"); r = -ENOMEM; urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL); if (!urbs) goto error; for (i = 0; i < RX_URBS_COUNT; i++) { urbs[i] = alloc_rx_urb(usb); if (!urbs[i]) goto error; } ZD_ASSERT(!irqs_disabled()); spin_lock_irq(&rx->lock); if (rx->urbs) { spin_unlock_irq(&rx->lock); r = 0; goto error; } rx->urbs = urbs; rx->urbs_count = RX_URBS_COUNT; spin_unlock_irq(&rx->lock); for (i = 0; i < RX_URBS_COUNT; i++) { r = usb_submit_urb(urbs[i], GFP_KERNEL); if (r) goto error_submit; } return 0; error_submit: for (i = 0; i < RX_URBS_COUNT; i++) { usb_kill_urb(urbs[i]); } spin_lock_irq(&rx->lock); rx->urbs = NULL; rx->urbs_count = 0; spin_unlock_irq(&rx->lock); error: if (urbs) { for (i = 0; i < RX_URBS_COUNT; i++) free_rx_urb(urbs[i]); } return r; } int zd_usb_enable_rx(struct zd_usb *usb) { int r; struct zd_usb_rx *rx = &usb->rx; mutex_lock(&rx->setup_mutex); r = __zd_usb_enable_rx(usb); mutex_unlock(&rx->setup_mutex); zd_usb_reset_rx_idle_timer(usb); return r; } static void __zd_usb_disable_rx(struct zd_usb *usb) { int i; unsigned long flags; struct urb **urbs; unsigned int count; struct zd_usb_rx *rx = &usb->rx; spin_lock_irqsave(&rx->lock, flags); urbs = rx->urbs; count = rx->urbs_count; spin_unlock_irqrestore(&rx->lock, flags); if (!urbs) return; for (i = 0; i < count; i++) { usb_kill_urb(urbs[i]); free_rx_urb(urbs[i]); } kfree(urbs); spin_lock_irqsave(&rx->lock, flags); rx->urbs = NULL; rx->urbs_count = 0; spin_unlock_irqrestore(&rx->lock, flags); } void zd_usb_disable_rx(struct zd_usb *usb) { struct zd_usb_rx *rx = &usb->rx; mutex_lock(&rx->setup_mutex); __zd_usb_disable_rx(usb); mutex_unlock(&rx->setup_mutex); tasklet_kill(&rx->reset_timer_tasklet); cancel_delayed_work_sync(&rx->idle_work); } static void zd_usb_reset_rx(struct zd_usb *usb) { bool do_reset; struct zd_usb_rx *rx = &usb->rx; unsigned long flags; mutex_lock(&rx->setup_mutex); spin_lock_irqsave(&rx->lock, flags); do_reset = rx->urbs != NULL; spin_unlock_irqrestore(&rx->lock, flags); if (do_reset) { __zd_usb_disable_rx(usb); __zd_usb_enable_rx(usb); } mutex_unlock(&rx->setup_mutex); if (do_reset) zd_usb_reset_rx_idle_timer(usb); } /** * zd_usb_disable_tx - disable transmission * @usb: the zd1211rw-private USB structure * * Frees all URBs in the free list and marks the transmission as disabled. */ void zd_usb_disable_tx(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; unsigned long flags; atomic_set(&tx->enabled, 0); /* kill all submitted tx-urbs */ usb_kill_anchored_urbs(&tx->submitted); spin_lock_irqsave(&tx->lock, flags); WARN_ON(!skb_queue_empty(&tx->submitted_skbs)); WARN_ON(tx->submitted_urbs != 0); tx->submitted_urbs = 0; spin_unlock_irqrestore(&tx->lock, flags); /* The stopped state is ignored, relying on ieee80211_wake_queues() * in a potentionally following zd_usb_enable_tx(). */ } /** * zd_usb_enable_tx - enables transmission * @usb: a &struct zd_usb pointer * * This function enables transmission and prepares the &zd_usb_tx data * structure. */ void zd_usb_enable_tx(struct zd_usb *usb) { unsigned long flags; struct zd_usb_tx *tx = &usb->tx; spin_lock_irqsave(&tx->lock, flags); atomic_set(&tx->enabled, 1); tx->submitted_urbs = 0; ieee80211_wake_queues(zd_usb_to_hw(usb)); tx->stopped = 0; spin_unlock_irqrestore(&tx->lock, flags); } static void tx_dec_submitted_urbs(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; unsigned long flags; spin_lock_irqsave(&tx->lock, flags); --tx->submitted_urbs; if (tx->stopped && tx->submitted_urbs <= ZD_USB_TX_LOW) { ieee80211_wake_queues(zd_usb_to_hw(usb)); tx->stopped = 0; } spin_unlock_irqrestore(&tx->lock, flags); } static void tx_inc_submitted_urbs(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; unsigned long flags; spin_lock_irqsave(&tx->lock, flags); ++tx->submitted_urbs; if (!tx->stopped && tx->submitted_urbs > ZD_USB_TX_HIGH) { ieee80211_stop_queues(zd_usb_to_hw(usb)); tx->stopped = 1; } spin_unlock_irqrestore(&tx->lock, flags); } /** * tx_urb_complete - completes the execution of an URB * @urb: a URB * * This function is called if the URB has been transferred to a device or an * error has happened. */ static void tx_urb_complete(struct urb *urb) { int r; struct sk_buff *skb; struct ieee80211_tx_info *info; struct zd_usb *usb; struct zd_usb_tx *tx; skb = (struct sk_buff *)urb->context; info = IEEE80211_SKB_CB(skb); /* * grab 'usb' pointer before handing off the skb (since * it might be freed by zd_mac_tx_to_dev or mac80211) */ usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb; tx = &usb->tx; switch (urb->status) { case 0: break; case -ESHUTDOWN: case -EINVAL: case -ENODEV: case -ENOENT: case -ECONNRESET: case -EPIPE: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); break; default: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); goto resubmit; } free_urb: skb_unlink(skb, &usb->tx.submitted_skbs); zd_mac_tx_to_dev(skb, urb->status); usb_free_urb(urb); tx_dec_submitted_urbs(usb); return; resubmit: usb_anchor_urb(urb, &tx->submitted); r = usb_submit_urb(urb, GFP_ATOMIC); if (r) { usb_unanchor_urb(urb); dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r); goto free_urb; } } /** * zd_usb_tx: initiates transfer of a frame of the device * * @usb: the zd1211rw-private USB structure * @skb: a &struct sk_buff pointer * * This function tranmits a frame to the device. It doesn't wait for * completion. The frame must contain the control set and have all the * control set information available. * * The function returns 0 if the transfer has been successfully initiated. */ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb) { int r; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct usb_device *udev = zd_usb_to_usbdev(usb); struct urb *urb; struct zd_usb_tx *tx = &usb->tx; if (!atomic_read(&tx->enabled)) { r = -ENOENT; goto out; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { r = -ENOMEM; goto out; } usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), skb->data, skb->len, tx_urb_complete, skb); info->rate_driver_data[1] = (void *)jiffies; skb_queue_tail(&tx->submitted_skbs, skb); usb_anchor_urb(urb, &tx->submitted); r = usb_submit_urb(urb, GFP_ATOMIC); if (r) { dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r); usb_unanchor_urb(urb); skb_unlink(skb, &tx->submitted_skbs); goto error; } tx_inc_submitted_urbs(usb); return 0; error: usb_free_urb(urb); out: return r; } static bool zd_tx_timeout(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; struct sk_buff_head *q = &tx->submitted_skbs; struct sk_buff *skb, *skbnext; struct ieee80211_tx_info *info; unsigned long flags, trans_start; bool have_timedout = false; spin_lock_irqsave(&q->lock, flags); skb_queue_walk_safe(q, skb, skbnext) { info = IEEE80211_SKB_CB(skb); trans_start = (unsigned long)info->rate_driver_data[1]; if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) { have_timedout = true; break; } } spin_unlock_irqrestore(&q->lock, flags); return have_timedout; } static void zd_tx_watchdog_handler(struct work_struct *work) { struct zd_usb *usb = container_of(work, struct zd_usb, tx.watchdog_work.work); struct zd_usb_tx *tx = &usb->tx; if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled) goto out; if (!zd_tx_timeout(usb)) goto out; /* TX halted, try reset */ dev_warn(zd_usb_dev(usb), "TX-stall detected, resetting device..."); usb_queue_reset_device(usb->intf); /* reset will stop this worker, don't rearm */ return; out: queue_delayed_work(zd_workqueue, &tx->watchdog_work, ZD_TX_WATCHDOG_INTERVAL); } void zd_tx_watchdog_enable(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; if (!tx->watchdog_enabled) { dev_dbg_f(zd_usb_dev(usb), "\n"); queue_delayed_work(zd_workqueue, &tx->watchdog_work, ZD_TX_WATCHDOG_INTERVAL); tx->watchdog_enabled = 1; } } void zd_tx_watchdog_disable(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; if (tx->watchdog_enabled) { dev_dbg_f(zd_usb_dev(usb), "\n"); tx->watchdog_enabled = 0; cancel_delayed_work_sync(&tx->watchdog_work); } } static void zd_rx_idle_timer_handler(struct work_struct *work) { struct zd_usb *usb = container_of(work, struct zd_usb, rx.idle_work.work); struct zd_mac *mac = zd_usb_to_mac(usb); if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags)) return; dev_dbg_f(zd_usb_dev(usb), "\n"); /* 30 seconds since last rx, reset rx */ zd_usb_reset_rx(usb); } static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param) { struct zd_usb *usb = (struct zd_usb *)param; zd_usb_reset_rx_idle_timer(usb); } void zd_usb_reset_rx_idle_timer(struct zd_usb *usb) { struct zd_usb_rx *rx = &usb->rx; mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL); } static inline void init_usb_interrupt(struct zd_usb *usb) { struct zd_usb_interrupt *intr = &usb->intr; spin_lock_init(&intr->lock); intr->interval = int_urb_interval(zd_usb_to_usbdev(usb)); init_completion(&intr->read_regs.completion); atomic_set(&intr->read_regs_enabled, 0); intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT); } static inline void init_usb_rx(struct zd_usb *usb) { struct zd_usb_rx *rx = &usb->rx; spin_lock_init(&rx->lock); mutex_init(&rx->setup_mutex); if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { rx->usb_packet_size = 512; } else { rx->usb_packet_size = 64; } ZD_ASSERT(rx->fragment_length == 0); INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler); rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet; rx->reset_timer_tasklet.data = (unsigned long)usb; } static inline void init_usb_tx(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; spin_lock_init(&tx->lock); atomic_set(&tx->enabled, 0); tx->stopped = 0; skb_queue_head_init(&tx->submitted_skbs); init_usb_anchor(&tx->submitted); tx->submitted_urbs = 0; tx->watchdog_enabled = 0; INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler); } void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw, struct usb_interface *intf) { memset(usb, 0, sizeof(*usb)); usb->intf = usb_get_intf(intf); usb_set_intfdata(usb->intf, hw); init_usb_anchor(&usb->submitted_cmds); init_usb_interrupt(usb); init_usb_tx(usb); init_usb_rx(usb); } void zd_usb_clear(struct zd_usb *usb) { usb_set_intfdata(usb->intf, NULL); usb_put_intf(usb->intf); ZD_MEMCLEAR(usb, sizeof(*usb)); /* FIXME: usb_interrupt, usb_tx, usb_rx? */ } static const char *speed(enum usb_device_speed speed) { switch (speed) { case USB_SPEED_LOW: return "low"; case USB_SPEED_FULL: return "full"; case USB_SPEED_HIGH: return "high"; default: return "unknown speed"; } } static int scnprint_id(struct usb_device *udev, char *buffer, size_t size) { return scnprintf(buffer, size, "%04hx:%04hx v%04hx %s", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), get_bcdDevice(udev), speed(udev->speed)); } int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size) { struct usb_device *udev = interface_to_usbdev(usb->intf); return scnprint_id(udev, buffer, size); } #ifdef DEBUG static void print_id(struct usb_device *udev) { char buffer[40]; scnprint_id(udev, buffer, sizeof(buffer)); buffer[sizeof(buffer)-1] = 0; dev_dbg_f(&udev->dev, "%s\n", buffer); } #else #define print_id(udev) do { } while (0) #endif static int eject_installer(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *iface_desc = &intf->altsetting[0]; struct usb_endpoint_descriptor *endpoint; unsigned char *cmd; u8 bulk_out_ep; int r; /* Find bulk out endpoint */ for (r = 1; r >= 0; r--) { endpoint = &iface_desc->endpoint[r].desc; if (usb_endpoint_dir_out(endpoint) && usb_endpoint_xfer_bulk(endpoint)) { bulk_out_ep = endpoint->bEndpointAddress; break; } } if (r == -1) { dev_err(&udev->dev, "zd1211rw: Could not find bulk out endpoint\n"); return -ENODEV; } cmd = kzalloc(31, GFP_KERNEL); if (cmd == NULL) return -ENODEV; /* USB bulk command block */ cmd[0] = 0x55; /* bulk command signature */ cmd[1] = 0x53; /* bulk command signature */ cmd[2] = 0x42; /* bulk command signature */ cmd[3] = 0x43; /* bulk command signature */ cmd[14] = 6; /* command length */ cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */ cmd[19] = 0x2; /* eject disc */ dev_info(&udev->dev, "Ejecting virtual installer media...\n"); r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep), cmd, 31, NULL, 2000); kfree(cmd); if (r) return r; /* At this point, the device disconnects and reconnects with the real * ID numbers. */ usb_set_intfdata(intf, NULL); return 0; } int zd_usb_init_hw(struct zd_usb *usb) { int r; struct zd_mac *mac = zd_usb_to_mac(usb); dev_dbg_f(zd_usb_dev(usb), "\n"); r = upload_firmware(usb); if (r) { dev_err(zd_usb_dev(usb), "couldn't load firmware. Error number %d\n", r); return r; } r = usb_reset_configuration(zd_usb_to_usbdev(usb)); if (r) { dev_dbg_f(zd_usb_dev(usb), "couldn't reset configuration. Error number %d\n", r); return r; } r = zd_mac_init_hw(mac->hw); if (r) { dev_dbg_f(zd_usb_dev(usb), "couldn't initialize mac. Error number %d\n", r); return r; } usb->initialized = 1; return 0; } static int probe(struct usb_interface *intf, const struct usb_device_id *id) { int r; struct usb_device *udev = interface_to_usbdev(intf); struct zd_usb *usb; struct ieee80211_hw *hw = NULL; print_id(udev); if (id->driver_info & DEVICE_INSTALLER) return eject_installer(intf); switch (udev->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: break; default: dev_dbg_f(&intf->dev, "Unknown USB speed\n"); r = -ENODEV; goto error; } r = usb_reset_device(udev); if (r) { dev_err(&intf->dev, "couldn't reset usb device. Error number %d\n", r); goto error; } hw = zd_mac_alloc_hw(intf); if (hw == NULL) { r = -ENOMEM; goto error; } usb = &zd_hw_mac(hw)->chip.usb; usb->is_zd1211b = (id->driver_info == DEVICE_ZD1211B) != 0; r = zd_mac_preinit_hw(hw); if (r) { dev_dbg_f(&intf->dev, "couldn't initialize mac. Error number %d\n", r); goto error; } r = ieee80211_register_hw(hw); if (r) { dev_dbg_f(&intf->dev, "couldn't register device. Error number %d\n", r); goto error; } dev_dbg_f(&intf->dev, "successful\n"); dev_info(&intf->dev, "%s\n", wiphy_name(hw->wiphy)); return 0; error: usb_reset_device(interface_to_usbdev(intf)); if (hw) { zd_mac_clear(zd_hw_mac(hw)); ieee80211_free_hw(hw); } return r; } static void disconnect(struct usb_interface *intf) { struct ieee80211_hw *hw = zd_intf_to_hw(intf); struct zd_mac *mac; struct zd_usb *usb; /* Either something really bad happened, or we're just dealing with * a DEVICE_INSTALLER. */ if (hw == NULL) return; mac = zd_hw_mac(hw); usb = &mac->chip.usb; dev_dbg_f(zd_usb_dev(usb), "\n"); ieee80211_unregister_hw(hw); /* Just in case something has gone wrong! */ zd_usb_disable_tx(usb); zd_usb_disable_rx(usb); zd_usb_disable_int(usb); /* If the disconnect has been caused by a removal of the * driver module, the reset allows reloading of the driver. If the * reset will not be executed here, the upload of the firmware in the * probe function caused by the reloading of the driver will fail. */ usb_reset_device(interface_to_usbdev(intf)); zd_mac_clear(mac); ieee80211_free_hw(hw); dev_dbg(&intf->dev, "disconnected\n"); } static void zd_usb_resume(struct zd_usb *usb) { struct zd_mac *mac = zd_usb_to_mac(usb); int r; dev_dbg_f(zd_usb_dev(usb), "\n"); r = zd_op_start(zd_usb_to_hw(usb)); if (r < 0) { dev_warn(zd_usb_dev(usb), "Device resume failed " "with error code %d. Retrying...\n", r); if (usb->was_running) set_bit(ZD_DEVICE_RUNNING, &mac->flags); usb_queue_reset_device(usb->intf); return; } if (mac->type != NL80211_IFTYPE_UNSPECIFIED) { r = zd_restore_settings(mac); if (r < 0) { dev_dbg(zd_usb_dev(usb), "failed to restore settings, %d\n", r); return; } } } static void zd_usb_stop(struct zd_usb *usb) { dev_dbg_f(zd_usb_dev(usb), "\n"); zd_op_stop(zd_usb_to_hw(usb)); zd_usb_disable_tx(usb); zd_usb_disable_rx(usb); zd_usb_disable_int(usb); usb->initialized = 0; } static int pre_reset(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct zd_mac *mac; struct zd_usb *usb; if (!hw || intf->condition != USB_INTERFACE_BOUND) return 0; mac = zd_hw_mac(hw); usb = &mac->chip.usb; usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags); zd_usb_stop(usb); mutex_lock(&mac->chip.mutex); return 0; } static int post_reset(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct zd_mac *mac; struct zd_usb *usb; if (!hw || intf->condition != USB_INTERFACE_BOUND) return 0; mac = zd_hw_mac(hw); usb = &mac->chip.usb; mutex_unlock(&mac->chip.mutex); if (usb->was_running) zd_usb_resume(usb); return 0; } static struct usb_driver driver = { .name = KBUILD_MODNAME, .id_table = usb_ids, .probe = probe, .disconnect = disconnect, .pre_reset = pre_reset, .post_reset = post_reset, .disable_hub_initiated_lpm = 1, }; struct workqueue_struct *zd_workqueue; static int __init usb_init(void) { int r; pr_debug("%s usb_init()\n", driver.name); zd_workqueue = create_singlethread_workqueue(driver.name); if (zd_workqueue == NULL) { printk(KERN_ERR "%s couldn't create workqueue\n", driver.name); return -ENOMEM; } r = usb_register(&driver); if (r) { destroy_workqueue(zd_workqueue); printk(KERN_ERR "%s usb_register() failed. Error number %d\n", driver.name, r); return r; } pr_debug("%s initialized\n", driver.name); return 0; } static void __exit usb_exit(void) { pr_debug("%s usb_exit()\n", driver.name); usb_deregister(&driver); destroy_workqueue(zd_workqueue); } module_init(usb_init); module_exit(usb_exit); static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len, int *actual_length, int timeout) { /* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint * descriptor. */ struct usb_host_endpoint *ep; unsigned int pipe; pipe = usb_sndintpipe(udev, EP_REGS_OUT); ep = usb_pipe_endpoint(udev, pipe); if (!ep) return -EINVAL; if (usb_endpoint_xfer_int(&ep->desc)) { return usb_interrupt_msg(udev, pipe, data, len, actual_length, timeout); } else { pipe = usb_sndbulkpipe(udev, EP_REGS_OUT); return usb_bulk_msg(udev, pipe, data, len, actual_length, timeout); } } static int usb_int_regs_length(unsigned int count) { return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data); } static void prepare_read_regs_int(struct zd_usb *usb, struct usb_req_read_regs *req, unsigned int count) { struct zd_usb_interrupt *intr = &usb->intr; spin_lock_irq(&intr->lock); atomic_set(&intr->read_regs_enabled, 1); intr->read_regs.req = req; intr->read_regs.req_count = count; reinit_completion(&intr->read_regs.completion); spin_unlock_irq(&intr->lock); } static void disable_read_regs_int(struct zd_usb *usb) { struct zd_usb_interrupt *intr = &usb->intr; spin_lock_irq(&intr->lock); atomic_set(&intr->read_regs_enabled, 0); spin_unlock_irq(&intr->lock); } static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req, unsigned int count) { int i; struct zd_usb_interrupt *intr = &usb->intr; struct read_regs_int *rr = &intr->read_regs; struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer; /* The created block size seems to be larger than expected. * However results appear to be correct. */ if (rr->length < usb_int_regs_length(count)) { dev_dbg_f(zd_usb_dev(usb), "error: actual length %d less than expected %d\n", rr->length, usb_int_regs_length(count)); return false; } if (rr->length > sizeof(rr->buffer)) { dev_dbg_f(zd_usb_dev(usb), "error: actual length %d exceeds buffer size %zu\n", rr->length, sizeof(rr->buffer)); return false; } for (i = 0; i < count; i++) { struct reg_data *rd = &regs->regs[i]; if (rd->addr != req->addr[i]) { dev_dbg_f(zd_usb_dev(usb), "rd[%d] addr %#06hx expected %#06hx\n", i, le16_to_cpu(rd->addr), le16_to_cpu(req->addr[i])); return false; } } return true; } static int get_results(struct zd_usb *usb, u16 *values, struct usb_req_read_regs *req, unsigned int count, bool *retry) { int r; int i; struct zd_usb_interrupt *intr = &usb->intr; struct read_regs_int *rr = &intr->read_regs; struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer; spin_lock_irq(&intr->lock); r = -EIO; /* Read failed because firmware bug? */ *retry = !!intr->read_regs_int_overridden; if (*retry) goto error_unlock; if (!check_read_regs(usb, req, count)) { dev_dbg_f(zd_usb_dev(usb), "error: invalid read regs\n"); goto error_unlock; } for (i = 0; i < count; i++) { struct reg_data *rd = &regs->regs[i]; values[i] = le16_to_cpu(rd->value); } r = 0; error_unlock: spin_unlock_irq(&intr->lock); return r; } int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, const zd_addr_t *addresses, unsigned int count) { int r, i, req_len, actual_req_len, try_count = 0; struct usb_device *udev; struct usb_req_read_regs *req = NULL; unsigned long timeout; bool retry = false; if (count < 1) { dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n"); return -EINVAL; } if (count > USB_MAX_IOREAD16_COUNT) { dev_dbg_f(zd_usb_dev(usb), "error: count %u exceeds possible max %u\n", count, USB_MAX_IOREAD16_COUNT); return -EINVAL; } if (in_atomic()) { dev_dbg_f(zd_usb_dev(usb), "error: io in atomic context not supported\n"); return -EWOULDBLOCK; } if (!usb_int_enabled(usb)) { dev_dbg_f(zd_usb_dev(usb), "error: usb interrupt not enabled\n"); return -EWOULDBLOCK; } ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT * sizeof(__le16) > sizeof(usb->req_buf)); BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) > sizeof(usb->req_buf)); req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16); req = (void *)usb->req_buf; req->id = cpu_to_le16(USB_REQ_READ_REGS); for (i = 0; i < count; i++) req->addr[i] = cpu_to_le16((u16)addresses[i]); retry_read: try_count++; udev = zd_usb_to_usbdev(usb); prepare_read_regs_int(usb, req, count); r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/); if (r) { dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg(). Error number %d\n", r); goto error; } if (req_len != actual_req_len) { dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n" " req_len %d != actual_req_len %d\n", req_len, actual_req_len); r = -EIO; goto error; } timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, msecs_to_jiffies(50)); if (!timeout) { disable_read_regs_int(usb); dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); r = -ETIMEDOUT; goto error; } r = get_results(usb, values, req, count, &retry); if (retry && try_count < 20) { dev_dbg_f(zd_usb_dev(usb), "read retry, tries so far: %d\n", try_count); goto retry_read; } error: return r; } static void iowrite16v_urb_complete(struct urb *urb) { struct zd_usb *usb = urb->context; if (urb->status && !usb->cmd_error) usb->cmd_error = urb->status; if (!usb->cmd_error && urb->actual_length != urb->transfer_buffer_length) usb->cmd_error = -EIO; } static int zd_submit_waiting_urb(struct zd_usb *usb, bool last) { int r = 0; struct urb *urb = usb->urb_async_waiting; if (!urb) return 0; usb->urb_async_waiting = NULL; if (!last) urb->transfer_flags |= URB_NO_INTERRUPT; usb_anchor_urb(urb, &usb->submitted_cmds); r = usb_submit_urb(urb, GFP_KERNEL); if (r) { usb_unanchor_urb(urb); dev_dbg_f(zd_usb_dev(usb), "error in usb_submit_urb(). Error number %d\n", r); goto error; } /* fall-through with r == 0 */ error: usb_free_urb(urb); return r; } void zd_usb_iowrite16v_async_start(struct zd_usb *usb) { ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds)); ZD_ASSERT(usb->urb_async_waiting == NULL); ZD_ASSERT(!usb->in_async); ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); usb->in_async = 1; usb->cmd_error = 0; usb->urb_async_waiting = NULL; } int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout) { int r; ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); ZD_ASSERT(usb->in_async); /* Submit last iowrite16v URB */ r = zd_submit_waiting_urb(usb, true); if (r) { dev_dbg_f(zd_usb_dev(usb), "error in zd_submit_waiting_usb(). " "Error number %d\n", r); usb_kill_anchored_urbs(&usb->submitted_cmds); goto error; } if (timeout) timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds, timeout); if (!timeout) { usb_kill_anchored_urbs(&usb->submitted_cmds); if (usb->cmd_error == -ENOENT) { dev_dbg_f(zd_usb_dev(usb), "timed out"); r = -ETIMEDOUT; goto error; } } r = usb->cmd_error; error: usb->in_async = 0; return r; } int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, unsigned int count) { int r; struct usb_device *udev; struct usb_req_write_regs *req = NULL; int i, req_len; struct urb *urb; struct usb_host_endpoint *ep; ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); ZD_ASSERT(usb->in_async); if (count == 0) return 0; if (count > USB_MAX_IOWRITE16_COUNT) { dev_dbg_f(zd_usb_dev(usb), "error: count %u exceeds possible max %u\n", count, USB_MAX_IOWRITE16_COUNT); return -EINVAL; } if (in_atomic()) { dev_dbg_f(zd_usb_dev(usb), "error: io in atomic context not supported\n"); return -EWOULDBLOCK; } udev = zd_usb_to_usbdev(usb); ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT)); if (!ep) return -ENOENT; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; req_len = sizeof(struct usb_req_write_regs) + count * sizeof(struct reg_data); req = kmalloc(req_len, GFP_KERNEL); if (!req) { r = -ENOMEM; goto error; } req->id = cpu_to_le16(USB_REQ_WRITE_REGS); for (i = 0; i < count; i++) { struct reg_data *rw = &req->reg_writes[i]; rw->addr = cpu_to_le16((u16)ioreqs[i].addr); rw->value = cpu_to_le16(ioreqs[i].value); } /* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode * endpoint is bulk. Select correct type URB by endpoint descriptor. */ if (usb_endpoint_xfer_int(&ep->desc)) usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), req, req_len, iowrite16v_urb_complete, usb, ep->desc.bInterval); else usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT), req, req_len, iowrite16v_urb_complete, usb); urb->transfer_flags |= URB_FREE_BUFFER; /* Submit previous URB */ r = zd_submit_waiting_urb(usb, false); if (r) { dev_dbg_f(zd_usb_dev(usb), "error in zd_submit_waiting_usb(). " "Error number %d\n", r); goto error; } /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs * of currect batch except for very last. */ usb->urb_async_waiting = urb; return 0; error: usb_free_urb(urb); return r; } int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, unsigned int count) { int r; zd_usb_iowrite16v_async_start(usb); r = zd_usb_iowrite16v_async(usb, ioreqs, count); if (r) { zd_usb_iowrite16v_async_end(usb, 0); return r; } return zd_usb_iowrite16v_async_end(usb, 50 /* ms */); } int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) { int r; struct usb_device *udev; struct usb_req_rfwrite *req = NULL; int i, req_len, actual_req_len; u16 bit_value_template; if (in_atomic()) { dev_dbg_f(zd_usb_dev(usb), "error: io in atomic context not supported\n"); return -EWOULDBLOCK; } if (bits < USB_MIN_RFWRITE_BIT_COUNT) { dev_dbg_f(zd_usb_dev(usb), "error: bits %d are smaller than" " USB_MIN_RFWRITE_BIT_COUNT %d\n", bits, USB_MIN_RFWRITE_BIT_COUNT); return -EINVAL; } if (bits > USB_MAX_RFWRITE_BIT_COUNT) { dev_dbg_f(zd_usb_dev(usb), "error: bits %d exceed USB_MAX_RFWRITE_BIT_COUNT %d\n", bits, USB_MAX_RFWRITE_BIT_COUNT); return -EINVAL; } #ifdef DEBUG if (value & (~0UL << bits)) { dev_dbg_f(zd_usb_dev(usb), "error: value %#09x has bits >= %d set\n", value, bits); return -EINVAL; } #endif /* DEBUG */ dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits); r = zd_usb_ioread16(usb, &bit_value_template, ZD_CR203); if (r) { dev_dbg_f(zd_usb_dev(usb), "error %d: Couldn't read ZD_CR203\n", r); return r; } bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA); ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) + USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) > sizeof(usb->req_buf)); BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) > sizeof(usb->req_buf)); req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16); req = (void *)usb->req_buf; req->id = cpu_to_le16(USB_REQ_WRITE_RF); /* 1: 3683a, but not used in ZYDAS driver */ req->value = cpu_to_le16(2); req->bits = cpu_to_le16(bits); for (i = 0; i < bits; i++) { u16 bv = bit_value_template; if (value & (1 << (bits-1-i))) bv |= RF_DATA; req->bit_values[i] = cpu_to_le16(bv); } udev = zd_usb_to_usbdev(usb); r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/); if (r) { dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg(). Error number %d\n", r); goto out; } if (req_len != actual_req_len) { dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()" " req_len %d != actual_req_len %d\n", req_len, actual_req_len); r = -EIO; goto out; } /* FALL-THROUGH with r == 0 */ out: return r; }
null
null
null
null
94,506
532
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
153,589
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Copyright (c) Markus Schmidt and Christian Holschuh * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/opt.h" #include "avfilter.h" #include "internal.h" #include "audio.h" typedef struct LFOContext { double freq; double offset; int srate; double amount; double pwidth; double phase; } LFOContext; typedef struct SRContext { double target; double real; double samples; double last; } SRContext; typedef struct ACrusherContext { const AVClass *class; double level_in; double level_out; double bits; double mix; int mode; double dc; double idc; double aa; double samples; int is_lfo; double lforange; double lforate; double sqr; double aa1; double coeff; int round; double sov; double smin; double sdiff; LFOContext lfo; SRContext *sr; } ACrusherContext; #define OFFSET(x) offsetof(ACrusherContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM static const AVOption acrusher_options[] = { { "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A }, { "level_out","set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A }, { "bits", "set bit reduction", OFFSET(bits), AV_OPT_TYPE_DOUBLE, {.dbl=8}, 1, 64, A }, { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, A }, { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "mode" }, { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "mode" }, { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "mode" }, { "dc", "set DC", OFFSET(dc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, .25, 4, A }, { "aa", "set anti-aliasing", OFFSET(aa), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, A }, { "samples", "set sample reduction", OFFSET(samples), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 250, A }, { "lfo", "enable LFO", OFFSET(is_lfo), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A }, { "lforange", "set LFO depth", OFFSET(lforange), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 250, A }, { "lforate", "set LFO rate", OFFSET(lforate), AV_OPT_TYPE_DOUBLE, {.dbl=.3}, .01, 200, A }, { NULL } }; AVFILTER_DEFINE_CLASS(acrusher); static double samplereduction(ACrusherContext *s, SRContext *sr, double in) { sr->samples++; if (sr->samples >= s->round) { sr->target += s->samples; sr->real += s->round; if (sr->target + s->samples >= sr->real + 1) { sr->last = in; sr->target = 0; sr->real = 0; } sr->samples = 0; } return sr->last; } static double add_dc(double s, double dc, double idc) { return s > 0 ? s * dc : s * idc; } static double remove_dc(double s, double dc, double idc) { return s > 0 ? s * idc : s * dc; } static inline double factor(double y, double k, double aa1, double aa) { return 0.5 * (sin(M_PI * (fabs(y - k) - aa1) / aa - M_PI_2) + 1); } static double bitreduction(ACrusherContext *s, double in) { const double sqr = s->sqr; const double coeff = s->coeff; const double aa = s->aa; const double aa1 = s->aa1; double y, k; // add dc in = add_dc(in, s->dc, s->idc); // main rounding calculation depending on mode // the idea for anti-aliasing: // you need a function f which brings you to the scale, where // you want to round and the function f_b (with f(f_b)=id) which // brings you back to your original scale. // // then you can use the logic below in the following way: // y = f(in) and k = roundf(y) // if (y > k + aa1) // k = f_b(k) + ( f_b(k+1) - f_b(k) ) * 0.5 * (sin(x - PI/2) + 1) // if (y < k + aa1) // k = f_b(k) - ( f_b(k+1) - f_b(k) ) * 0.5 * (sin(x - PI/2) + 1) // // whereas x = (fabs(f(in) - k) - aa1) * PI / aa // for both cases. switch (s->mode) { case 0: default: // linear y = in * coeff; k = roundf(y); if (k - aa1 <= y && y <= k + aa1) { k /= coeff; } else if (y > k + aa1) { k = k / coeff + ((k + 1) / coeff - k / coeff) * factor(y, k, aa1, aa); } else { k = k / coeff - (k / coeff - (k - 1) / coeff) * factor(y, k, aa1, aa); } break; case 1: // logarithmic y = sqr * log(fabs(in)) + sqr * sqr; k = roundf(y); if(!in) { k = 0; } else if (k - aa1 <= y && y <= k + aa1) { k = in / fabs(in) * exp(k / sqr - sqr); } else if (y > k + aa1) { double x = exp(k / sqr - sqr); k = FFSIGN(in) * (x + (exp((k + 1) / sqr - sqr) - x) * factor(y, k, aa1, aa)); } else { double x = exp(k / sqr - sqr); k = in / fabs(in) * (x - (x - exp((k - 1) / sqr - sqr)) * factor(y, k, aa1, aa)); } break; } // mix between dry and wet signal k += (in - k) * s->mix; // remove dc k = remove_dc(k, s->dc, s->idc); return k; } static double lfo_get(LFOContext *lfo) { double phs = FFMIN(100., lfo->phase / FFMIN(1.99, FFMAX(0.01, lfo->pwidth)) + lfo->offset); double val; if (phs > 1) phs = fmod(phs, 1.); val = sin((phs * 360.) * M_PI / 180); return val * lfo->amount; } static void lfo_advance(LFOContext *lfo, unsigned count) { lfo->phase = fabs(lfo->phase + count * lfo->freq * (1. / lfo->srate)); if (lfo->phase >= 1.) lfo->phase = fmod(lfo->phase, 1.); } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; ACrusherContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; const double *src = (const double *)in->data[0]; double *dst; const double level_in = s->level_in; const double level_out = s->level_out; const double mix = s->mix; int n, c; if (av_frame_is_writable(in)) { out = in; } else { out = ff_get_audio_buffer(inlink, in->nb_samples); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } dst = (double *)out->data[0]; for (n = 0; n < in->nb_samples; n++) { if (s->is_lfo) { s->samples = s->smin + s->sdiff * (lfo_get(&s->lfo) + 0.5); s->round = round(s->samples); } for (c = 0; c < inlink->channels; c++) { double sample = src[c] * level_in; sample = mix * samplereduction(s, &s->sr[c], sample) + src[c] * (1. - mix) * level_in; dst[c] = bitreduction(s, sample) * level_out; } src += c; dst += c; if (s->is_lfo) lfo_advance(&s->lfo, 1); } if (in != out) av_frame_free(&in); return ff_filter_frame(outlink, out); } static int query_formats(AVFilterContext *ctx) { AVFilterFormats *formats; AVFilterChannelLayouts *layouts; static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE }; int ret; layouts = ff_all_channel_counts(); if (!layouts) return AVERROR(ENOMEM); ret = ff_set_common_channel_layouts(ctx, layouts); if (ret < 0) return ret; formats = ff_make_format_list(sample_fmts); if (!formats) return AVERROR(ENOMEM); ret = ff_set_common_formats(ctx, formats); if (ret < 0) return ret; formats = ff_all_samplerates(); if (!formats) return AVERROR(ENOMEM); return ff_set_common_samplerates(ctx, formats); } static av_cold void uninit(AVFilterContext *ctx) { ACrusherContext *s = ctx->priv; av_freep(&s->sr); } static int config_input(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; ACrusherContext *s = ctx->priv; double rad, sunder, smax, sover; s->idc = 1. / s->dc; s->coeff = exp2(s->bits) - 1; s->sqr = sqrt(s->coeff / 2); s->aa1 = (1. - s->aa) / 2.; s->round = round(s->samples); rad = s->lforange / 2.; s->smin = FFMAX(s->samples - rad, 1.); sunder = s->samples - rad - s->smin; smax = FFMIN(s->samples + rad, 250.); sover = s->samples + rad - smax; smax -= sunder; s->smin -= sover; s->sdiff = smax - s->smin; s->lfo.freq = s->lforate; s->lfo.pwidth = 1.; s->lfo.srate = inlink->sample_rate; s->lfo.amount = .5; s->sr = av_calloc(inlink->channels, sizeof(*s->sr)); if (!s->sr) return AVERROR(ENOMEM); return 0; } static const AVFilterPad avfilter_af_acrusher_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, .config_props = config_input, .filter_frame = filter_frame, }, { NULL } }; static const AVFilterPad avfilter_af_acrusher_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, }, { NULL } }; AVFilter ff_af_acrusher = { .name = "acrusher", .description = NULL_IF_CONFIG_SMALL("Reduce audio bit resolution."), .priv_size = sizeof(ACrusherContext), .priv_class = &acrusher_class, .uninit = uninit, .query_formats = query_formats, .inputs = avfilter_af_acrusher_inputs, .outputs = avfilter_af_acrusher_outputs, };
null
null
null
null
69,644
31,034
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
31,034
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/bindings/modules/v8/serialization/v8_script_value_serializer_for_modules.h" #include "build/build_config.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/public/platform/scheduler/test/renderer_scheduler_test_support.h" #include "third_party/blink/public/platform/web_crypto_algorithm_params.h" #include "third_party/blink/public/platform/web_rtc_certificate_generator.h" #include "third_party/blink/renderer/bindings/core/v8/exception_state.h" #include "third_party/blink/renderer/bindings/core/v8/to_v8_for_core.h" #include "third_party/blink/renderer/bindings/core/v8/v8_array_buffer.h" #include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_testing.h" #include "third_party/blink/renderer/bindings/core/v8/v8_dom_exception.h" #include "third_party/blink/renderer/bindings/modules/v8/serialization/v8_script_value_deserializer_for_modules.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_crypto_key.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_dom_file_system.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_rtc_certificate.h" #include "third_party/blink/renderer/core/typed_arrays/dom_array_buffer.h" #include "third_party/blink/renderer/modules/crypto/crypto_result_impl.h" #include "third_party/blink/renderer/modules/filesystem/dom_file_system.h" #include "third_party/blink/renderer/modules/peerconnection/rtc_certificate.h" #include "third_party/blink/renderer/platform/testing/unit_test_helpers.h" using testing::ElementsAre; using testing::ElementsAreArray; using testing::UnorderedElementsAre; namespace blink { namespace { v8::Local<v8::Value> RoundTripForModules(v8::Local<v8::Value> value, V8TestingScope& scope) { scoped_refptr<ScriptState> script_state = scope.GetScriptState(); ExceptionState& exception_state = scope.GetExceptionState(); scoped_refptr<SerializedScriptValue> serialized_script_value = V8ScriptValueSerializerForModules( script_state, V8ScriptValueSerializerForModules::Options()) .Serialize(value, exception_state); DCHECK_EQ(!serialized_script_value, exception_state.HadException()); EXPECT_TRUE(serialized_script_value); if (!serialized_script_value) return v8::Local<v8::Value>(); return V8ScriptValueDeserializerForModules(script_state, serialized_script_value) .Deserialize(); } // Checks for a DOM exception, including a rethrown one. testing::AssertionResult HadDOMExceptionInModulesTest( const StringView& name, ScriptState* script_state, ExceptionState& exception_state) { if (!exception_state.HadException()) return testing::AssertionFailure() << "no exception thrown"; DOMException* dom_exception = V8DOMException::ToImplWithTypeCheck( script_state->GetIsolate(), exception_state.GetException()); if (!dom_exception) { return testing::AssertionFailure() << "exception thrown was not a DOMException"; } if (dom_exception->name() != name) return testing::AssertionFailure() << "was " << dom_exception->name(); return testing::AssertionSuccess(); } static const char kEcdsaPrivateKey[] = "-----BEGIN PRIVATE KEY-----\n" "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQghHwQ1xYtCoEhFk7r\n" "92u3ozy/MFR4I+9FiN8RYv5J96GhRANCAATLfi7OZLD9sIe5UMfMQnHQgAFaQD8h\n" "/cy6tB8wXZcixp7bZDp5t0GCDHqAUZT3Sa/NHaCelmmgPp3zW3lszXKP\n" "-----END PRIVATE KEY-----\n"; static const char kEcdsaCertificate[] = "-----BEGIN CERTIFICATE-----\n" "MIIBFjCBvaADAgECAgkApnGS+DzNWkUwCgYIKoZIzj0EAwIwETEPMA0GA1UEAwwG\n" "V2ViUlRDMB4XDTE2MDkxNTE4MDcxMloXDTE2MTAxNjE4MDcxMlowETEPMA0GA1UE\n" "AwwGV2ViUlRDMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy34uzmSw/bCHuVDH\n" "zEJx0IABWkA/If3MurQfMF2XIsae22Q6ebdBggx6gFGU90mvzR2gnpZpoD6d81t5\n" "bM1yjzAKBggqhkjOPQQDAgNIADBFAiBcTOyiexG0QHa5WhJuGtY6FhVZ5GyBMW+7\n" "LkH2QmxICwIhAJCujozN3gjIu7NMxSXuTqueuVz58SefCMA7/vj1TgfV\n" "-----END CERTIFICATE-----\n"; static const uint8_t kEcdsaCertificateEncoded[] = { 0xff, 0x09, 0x3f, 0x00, 0x6b, 0xf1, 0x01, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x47, 0x48, 0x41, 0x67, 0x45, 0x41, 0x4d, 0x42, 0x4d, 0x47, 0x42, 0x79, 0x71, 0x47, 0x53, 0x4d, 0x34, 0x39, 0x41, 0x67, 0x45, 0x47, 0x43, 0x43, 0x71, 0x47, 0x53, 0x4d, 0x34, 0x39, 0x41, 0x77, 0x45, 0x48, 0x42, 0x47, 0x30, 0x77, 0x61, 0x77, 0x49, 0x42, 0x41, 0x51, 0x51, 0x67, 0x68, 0x48, 0x77, 0x51, 0x31, 0x78, 0x59, 0x74, 0x43, 0x6f, 0x45, 0x68, 0x46, 0x6b, 0x37, 0x72, 0x0a, 0x39, 0x32, 0x75, 0x33, 0x6f, 0x7a, 0x79, 0x2f, 0x4d, 0x46, 0x52, 0x34, 0x49, 0x2b, 0x39, 0x46, 0x69, 0x4e, 0x38, 0x52, 0x59, 0x76, 0x35, 0x4a, 0x39, 0x36, 0x47, 0x68, 0x52, 0x41, 0x4e, 0x43, 0x41, 0x41, 0x54, 0x4c, 0x66, 0x69, 0x37, 0x4f, 0x5a, 0x4c, 0x44, 0x39, 0x73, 0x49, 0x65, 0x35, 0x55, 0x4d, 0x66, 0x4d, 0x51, 0x6e, 0x48, 0x51, 0x67, 0x41, 0x46, 0x61, 0x51, 0x44, 0x38, 0x68, 0x0a, 0x2f, 0x63, 0x79, 0x36, 0x74, 0x42, 0x38, 0x77, 0x58, 0x5a, 0x63, 0x69, 0x78, 0x70, 0x37, 0x62, 0x5a, 0x44, 0x70, 0x35, 0x74, 0x30, 0x47, 0x43, 0x44, 0x48, 0x71, 0x41, 0x55, 0x5a, 0x54, 0x33, 0x53, 0x61, 0x2f, 0x4e, 0x48, 0x61, 0x43, 0x65, 0x6c, 0x6d, 0x6d, 0x67, 0x50, 0x70, 0x33, 0x7a, 0x57, 0x33, 0x6c, 0x73, 0x7a, 0x58, 0x4b, 0x50, 0x0a, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0xb4, 0x03, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x42, 0x46, 0x6a, 0x43, 0x42, 0x76, 0x61, 0x41, 0x44, 0x41, 0x67, 0x45, 0x43, 0x41, 0x67, 0x6b, 0x41, 0x70, 0x6e, 0x47, 0x53, 0x2b, 0x44, 0x7a, 0x4e, 0x57, 0x6b, 0x55, 0x77, 0x43, 0x67, 0x59, 0x49, 0x4b, 0x6f, 0x5a, 0x49, 0x7a, 0x6a, 0x30, 0x45, 0x41, 0x77, 0x49, 0x77, 0x45, 0x54, 0x45, 0x50, 0x4d, 0x41, 0x30, 0x47, 0x41, 0x31, 0x55, 0x45, 0x41, 0x77, 0x77, 0x47, 0x0a, 0x56, 0x32, 0x56, 0x69, 0x55, 0x6c, 0x52, 0x44, 0x4d, 0x42, 0x34, 0x58, 0x44, 0x54, 0x45, 0x32, 0x4d, 0x44, 0x6b, 0x78, 0x4e, 0x54, 0x45, 0x34, 0x4d, 0x44, 0x63, 0x78, 0x4d, 0x6c, 0x6f, 0x58, 0x44, 0x54, 0x45, 0x32, 0x4d, 0x54, 0x41, 0x78, 0x4e, 0x6a, 0x45, 0x34, 0x4d, 0x44, 0x63, 0x78, 0x4d, 0x6c, 0x6f, 0x77, 0x45, 0x54, 0x45, 0x50, 0x4d, 0x41, 0x30, 0x47, 0x41, 0x31, 0x55, 0x45, 0x0a, 0x41, 0x77, 0x77, 0x47, 0x56, 0x32, 0x56, 0x69, 0x55, 0x6c, 0x52, 0x44, 0x4d, 0x46, 0x6b, 0x77, 0x45, 0x77, 0x59, 0x48, 0x4b, 0x6f, 0x5a, 0x49, 0x7a, 0x6a, 0x30, 0x43, 0x41, 0x51, 0x59, 0x49, 0x4b, 0x6f, 0x5a, 0x49, 0x7a, 0x6a, 0x30, 0x44, 0x41, 0x51, 0x63, 0x44, 0x51, 0x67, 0x41, 0x45, 0x79, 0x33, 0x34, 0x75, 0x7a, 0x6d, 0x53, 0x77, 0x2f, 0x62, 0x43, 0x48, 0x75, 0x56, 0x44, 0x48, 0x0a, 0x7a, 0x45, 0x4a, 0x78, 0x30, 0x49, 0x41, 0x42, 0x57, 0x6b, 0x41, 0x2f, 0x49, 0x66, 0x33, 0x4d, 0x75, 0x72, 0x51, 0x66, 0x4d, 0x46, 0x32, 0x58, 0x49, 0x73, 0x61, 0x65, 0x32, 0x32, 0x51, 0x36, 0x65, 0x62, 0x64, 0x42, 0x67, 0x67, 0x78, 0x36, 0x67, 0x46, 0x47, 0x55, 0x39, 0x30, 0x6d, 0x76, 0x7a, 0x52, 0x32, 0x67, 0x6e, 0x70, 0x5a, 0x70, 0x6f, 0x44, 0x36, 0x64, 0x38, 0x31, 0x74, 0x35, 0x0a, 0x62, 0x4d, 0x31, 0x79, 0x6a, 0x7a, 0x41, 0x4b, 0x42, 0x67, 0x67, 0x71, 0x68, 0x6b, 0x6a, 0x4f, 0x50, 0x51, 0x51, 0x44, 0x41, 0x67, 0x4e, 0x49, 0x41, 0x44, 0x42, 0x46, 0x41, 0x69, 0x42, 0x63, 0x54, 0x4f, 0x79, 0x69, 0x65, 0x78, 0x47, 0x30, 0x51, 0x48, 0x61, 0x35, 0x57, 0x68, 0x4a, 0x75, 0x47, 0x74, 0x59, 0x36, 0x46, 0x68, 0x56, 0x5a, 0x35, 0x47, 0x79, 0x42, 0x4d, 0x57, 0x2b, 0x37, 0x0a, 0x4c, 0x6b, 0x48, 0x32, 0x51, 0x6d, 0x78, 0x49, 0x43, 0x77, 0x49, 0x68, 0x41, 0x4a, 0x43, 0x75, 0x6a, 0x6f, 0x7a, 0x4e, 0x33, 0x67, 0x6a, 0x49, 0x75, 0x37, 0x4e, 0x4d, 0x78, 0x53, 0x58, 0x75, 0x54, 0x71, 0x75, 0x65, 0x75, 0x56, 0x7a, 0x35, 0x38, 0x53, 0x65, 0x66, 0x43, 0x4d, 0x41, 0x37, 0x2f, 0x76, 0x6a, 0x31, 0x54, 0x67, 0x66, 0x56, 0x0a, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a}; TEST(V8ScriptValueSerializerForModulesTest, RoundTripRTCCertificate) { // If WebRTC is not supported in this build, this test is meaningless. std::unique_ptr<WebRTCCertificateGenerator> certificate_generator( Platform::Current()->CreateRTCCertificateGenerator()); if (!certificate_generator) return; V8TestingScope scope; // Make a certificate with the existing key above. std::unique_ptr<WebRTCCertificate> web_certificate = certificate_generator->FromPEM( WebString::FromUTF8(kEcdsaPrivateKey, sizeof(kEcdsaPrivateKey)), WebString::FromUTF8(kEcdsaCertificate, sizeof(kEcdsaCertificate))); ASSERT_TRUE(web_certificate); RTCCertificate* certificate = new RTCCertificate(std::move(web_certificate)); // Round trip test. v8::Local<v8::Value> wrapper = ToV8(certificate, scope.GetContext()->Global(), scope.GetIsolate()); v8::Local<v8::Value> result = RoundTripForModules(wrapper, scope); ASSERT_TRUE(V8RTCCertificate::hasInstance(result, scope.GetIsolate())); RTCCertificate* new_certificate = V8RTCCertificate::ToImpl(result.As<v8::Object>()); WebRTCCertificatePEM pem = new_certificate->Certificate().ToPEM(); EXPECT_EQ(kEcdsaPrivateKey, pem.PrivateKey()); EXPECT_EQ(kEcdsaCertificate, pem.Certificate()); } TEST(V8ScriptValueSerializerForModulesTest, DecodeRTCCertificate) { // If WebRTC is not supported in this build, this test is meaningless. std::unique_ptr<WebRTCCertificateGenerator> certificate_generator( Platform::Current()->CreateRTCCertificateGenerator()); if (!certificate_generator) return; V8TestingScope scope; // This is encoded data generated from Chromium (around M55). ScriptState* script_state = scope.GetScriptState(); Vector<uint8_t> encoded_data; encoded_data.Append(kEcdsaCertificateEncoded, sizeof(kEcdsaCertificateEncoded)); scoped_refptr<SerializedScriptValue> input = SerializedValue(encoded_data); // Decode test. v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); ASSERT_TRUE(V8RTCCertificate::hasInstance(result, scope.GetIsolate())); RTCCertificate* new_certificate = V8RTCCertificate::ToImpl(result.As<v8::Object>()); WebRTCCertificatePEM pem = new_certificate->Certificate().ToPEM(); EXPECT_EQ(kEcdsaPrivateKey, pem.PrivateKey()); EXPECT_EQ(kEcdsaCertificate, pem.Certificate()); } TEST(V8ScriptValueSerializerForModulesTest, DecodeInvalidRTCCertificate) { V8TestingScope scope; // This is valid, except that "private" is not a valid private key PEM and // "certificate" is not a valid certificate PEM. This checks what happens if // these fail validation inside WebRTC. ScriptState* script_state = scope.GetScriptState(); scoped_refptr<SerializedScriptValue> input = SerializedValue( {0xff, 0x09, 0x3f, 0x00, 0x6b, 0x07, 'p', 'r', 'i', 'v', 'a', 't', 'e', 0x0b, 'c', 'e', 'r', 't', 'i', 'f', 'i', 'c', 'a', 't', 'e', 0x00}); // Decode test. v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); EXPECT_TRUE(result->IsNull()); } // A bunch of voodoo which allows the asynchronous WebCrypto operations to be // called synchronously, with the resulting JavaScript values extracted. using CryptoKeyPair = std::pair<CryptoKey*, CryptoKey*>; template <typename T> T ConvertCryptoResult(const ScriptValue&); template <> CryptoKey* ConvertCryptoResult<CryptoKey*>(const ScriptValue& value) { return V8CryptoKey::ToImplWithTypeCheck(value.GetIsolate(), value.V8Value()); } template <> CryptoKeyPair ConvertCryptoResult<CryptoKeyPair>(const ScriptValue& value) { NonThrowableExceptionState exception_state; Dictionary dictionary(value.GetIsolate(), value.V8Value(), exception_state); v8::Local<v8::Value> private_key, public_key; EXPECT_TRUE(dictionary.Get("publicKey", public_key)); EXPECT_TRUE(dictionary.Get("privateKey", private_key)); return std::make_pair( V8CryptoKey::ToImplWithTypeCheck(value.GetIsolate(), public_key), V8CryptoKey::ToImplWithTypeCheck(value.GetIsolate(), private_key)); } template <> DOMException* ConvertCryptoResult<DOMException*>(const ScriptValue& value) { return V8DOMException::ToImplWithTypeCheck(value.GetIsolate(), value.V8Value()); } template <> WebVector<unsigned char> ConvertCryptoResult<WebVector<unsigned char>>( const ScriptValue& value) { WebVector<unsigned char> vector; if (DOMArrayBuffer* buffer = V8ArrayBuffer::ToImplWithTypeCheck( value.GetIsolate(), value.V8Value())) { vector.Assign(reinterpret_cast<const unsigned char*>(buffer->Data()), buffer->ByteLength()); } return vector; } template <> bool ConvertCryptoResult<bool>(const ScriptValue& value) { return value.V8Value()->IsTrue(); } template <typename T> class WebCryptoResultAdapter : public ScriptFunction { private: WebCryptoResultAdapter(ScriptState* script_state, base::RepeatingCallback<void(T)> function) : ScriptFunction(script_state), function_(std::move(function)) {} ScriptValue Call(ScriptValue value) final { function_.Run(ConvertCryptoResult<T>(value)); return ScriptValue::From(GetScriptState(), ToV8UndefinedGenerator()); } base::RepeatingCallback<void(T)> function_; template <typename U> friend WebCryptoResult ToWebCryptoResult(ScriptState*, base::RepeatingCallback<void(U)>); }; template <typename T> WebCryptoResult ToWebCryptoResult(ScriptState* script_state, base::RepeatingCallback<void(T)> function) { CryptoResultImpl* result = CryptoResultImpl::Create(script_state); result->Promise().Then( (new WebCryptoResultAdapter<T>(script_state, std::move(function))) ->BindToV8Function(), (new WebCryptoResultAdapter<DOMException*>( script_state, WTF::BindRepeating([](DOMException* exception) { CHECK(false) << "crypto operation failed"; }))) ->BindToV8Function()); return result->Result(); } template <typename T, typename PMF, typename... Args> T SubtleCryptoSync(ScriptState* script_state, PMF func, Args&&... args) { T result; (Platform::Current()->Crypto()->*func)( std::forward<Args>(args)..., ToWebCryptoResult(script_state, WTF::BindRepeating( [](T* out, T result) { *out = result; test::ExitRunLoop(); }, WTF::Unretained(&result))), scheduler::GetSingleThreadTaskRunnerForTesting()); test::EnterRunLoop(); return result; } CryptoKey* SyncGenerateKey(ScriptState* script_state, const WebCryptoAlgorithm& algorithm, bool extractable, WebCryptoKeyUsageMask usages) { return SubtleCryptoSync<CryptoKey*>(script_state, &WebCrypto::GenerateKey, algorithm, extractable, usages); } CryptoKeyPair SyncGenerateKeyPair(ScriptState* script_state, const WebCryptoAlgorithm& algorithm, bool extractable, WebCryptoKeyUsageMask usages) { return SubtleCryptoSync<CryptoKeyPair>(script_state, &WebCrypto::GenerateKey, algorithm, extractable, usages); } CryptoKey* SyncImportKey(ScriptState* script_state, WebCryptoKeyFormat format, WebVector<unsigned char> data, const WebCryptoAlgorithm& algorithm, bool extractable, WebCryptoKeyUsageMask usages) { return SubtleCryptoSync<CryptoKey*>(script_state, &WebCrypto::ImportKey, format, data, algorithm, extractable, usages); } WebVector<uint8_t> SyncExportKey(ScriptState* script_state, WebCryptoKeyFormat format, const WebCryptoKey& key) { return SubtleCryptoSync<WebVector<uint8_t>>( script_state, &WebCrypto::ExportKey, format, key); } WebVector<uint8_t> SyncEncrypt(ScriptState* script_state, const WebCryptoAlgorithm& algorithm, const WebCryptoKey& key, WebVector<unsigned char> data) { return SubtleCryptoSync<WebVector<uint8_t>>(script_state, &WebCrypto::Encrypt, algorithm, key, data); } WebVector<uint8_t> SyncDecrypt(ScriptState* script_state, const WebCryptoAlgorithm& algorithm, const WebCryptoKey& key, WebVector<unsigned char> data) { return SubtleCryptoSync<WebVector<uint8_t>>(script_state, &WebCrypto::Decrypt, algorithm, key, data); } WebVector<uint8_t> SyncSign(ScriptState* script_state, const WebCryptoAlgorithm& algorithm, const WebCryptoKey& key, WebVector<unsigned char> message) { return SubtleCryptoSync<WebVector<uint8_t>>(script_state, &WebCrypto::Sign, algorithm, key, message); } bool SyncVerifySignature(ScriptState* script_state, const WebCryptoAlgorithm& algorithm, const WebCryptoKey& key, WebVector<unsigned char> signature, WebVector<unsigned char> message) { return SubtleCryptoSync<bool>(script_state, &WebCrypto::VerifySignature, algorithm, key, signature, message); } WebVector<uint8_t> SyncDeriveBits(ScriptState* script_state, const WebCryptoAlgorithm& algorithm, const WebCryptoKey& key, unsigned length) { return SubtleCryptoSync<WebVector<uint8_t>>( script_state, &WebCrypto::DeriveBits, algorithm, key, length); } // AES-128-CBC uses AES key params. TEST(V8ScriptValueSerializerForModulesTest, RoundTripCryptoKeyAES) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Generate a 128-bit AES key. std::unique_ptr<WebCryptoAlgorithmParams> params( new WebCryptoAesKeyGenParams(128)); WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdAesCbc, std::move(params)); CryptoKey* key = SyncGenerateKey(script_state, algorithm, true, kWebCryptoKeyUsageEncrypt | kWebCryptoKeyUsageDecrypt); // Round trip it and check the visible attributes. v8::Local<v8::Value> wrapper = ToV8(key, scope.GetScriptState()); v8::Local<v8::Value> result = RoundTripForModules(wrapper, scope); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("secret", new_key->type()); EXPECT_TRUE(new_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageEncrypt | kWebCryptoKeyUsageDecrypt, new_key->Key().Usages()); // Check that the keys have the same raw representation. WebVector<uint8_t> key_raw = SyncExportKey(script_state, kWebCryptoKeyFormatRaw, key->Key()); WebVector<uint8_t> new_key_raw = SyncExportKey(script_state, kWebCryptoKeyFormatRaw, new_key->Key()); EXPECT_THAT(new_key_raw, ElementsAreArray(key_raw)); // Check that one can decrypt data encrypted with the other. Vector<unsigned char> iv(16, 0); WebCryptoAlgorithm encrypt_algorithm( kWebCryptoAlgorithmIdAesCbc, std::make_unique<WebCryptoAesCbcParams>(iv)); Vector<unsigned char> plaintext{1, 2, 3}; WebVector<uint8_t> ciphertext = SyncEncrypt(script_state, encrypt_algorithm, key->Key(), plaintext); WebVector<uint8_t> new_plaintext = SyncDecrypt(script_state, encrypt_algorithm, new_key->Key(), ciphertext); EXPECT_THAT(new_plaintext, ElementsAre(1, 2, 3)); } TEST(V8ScriptValueSerializerForModulesTest, DecodeCryptoKeyAES) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Decode a 128-bit AES key (non-extractable, decrypt only). scoped_refptr<SerializedScriptValue> input = SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x01, 0x01, 0x10, 0x04, 0x10, 0x7e, 0x25, 0xb2, 0xe8, 0x62, 0x3e, 0xd7, 0x83, 0x70, 0xa2, 0xae, 0x98, 0x79, 0x1b, 0xc5, 0xf7}); v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("secret", new_key->type()); EXPECT_FALSE(new_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageDecrypt, new_key->Key().Usages()); // Check that it can successfully decrypt data. Vector<uint8_t> iv(16, 0); Vector<uint8_t> ciphertext{0x33, 0x26, 0xe7, 0x64, 0x11, 0x5e, 0xf4, 0x60, 0x96, 0x08, 0x11, 0xaf, 0x65, 0x8b, 0x87, 0x04}; WebCryptoAlgorithm encrypt_algorithm( kWebCryptoAlgorithmIdAesCbc, std::make_unique<WebCryptoAesCbcParams>(iv)); WebVector<uint8_t> plaintext = SyncDecrypt(script_state, encrypt_algorithm, new_key->Key(), ciphertext); EXPECT_THAT(plaintext, ElementsAre(1, 2, 3)); } // HMAC-SHA256 uses HMAC key params. TEST(V8ScriptValueSerializerForModulesTest, RoundTripCryptoKeyHMAC) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Generate an HMAC-SHA256 key. WebCryptoAlgorithm hash(kWebCryptoAlgorithmIdSha256, nullptr); std::unique_ptr<WebCryptoAlgorithmParams> generate_key_params( new WebCryptoHmacKeyGenParams(hash, false, 0)); WebCryptoAlgorithm generate_key_algorithm(kWebCryptoAlgorithmIdHmac, std::move(generate_key_params)); CryptoKey* key = SyncGenerateKey(script_state, generate_key_algorithm, true, kWebCryptoKeyUsageSign | kWebCryptoKeyUsageVerify); // Round trip it and check the visible attributes. v8::Local<v8::Value> wrapper = ToV8(key, scope.GetScriptState()); v8::Local<v8::Value> result = RoundTripForModules(wrapper, scope); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("secret", new_key->type()); EXPECT_TRUE(new_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageSign | kWebCryptoKeyUsageVerify, new_key->Key().Usages()); // Check that the keys have the same raw representation. WebVector<uint8_t> key_raw = SyncExportKey(script_state, kWebCryptoKeyFormatRaw, key->Key()); WebVector<uint8_t> new_key_raw = SyncExportKey(script_state, kWebCryptoKeyFormatRaw, new_key->Key()); EXPECT_THAT(new_key_raw, ElementsAreArray(key_raw)); // Check that one can verify a message signed by the other. Vector<uint8_t> message{1, 2, 3}; WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdHmac, nullptr); WebVector<uint8_t> signature = SyncSign(script_state, algorithm, key->Key(), message); EXPECT_TRUE(SyncVerifySignature(script_state, algorithm, new_key->Key(), signature, message)); } TEST(V8ScriptValueSerializerForModulesTest, DecodeCryptoKeyHMAC) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Decode an HMAC-SHA256 key (non-extractable, verify only). scoped_refptr<SerializedScriptValue> input = SerializedValue( {0xff, 0x09, 0x3f, 0x00, 0x4b, 0x02, 0x40, 0x06, 0x10, 0x40, 0xd9, 0xbd, 0x0e, 0x84, 0x24, 0x3c, 0xb0, 0xbc, 0xee, 0x36, 0x61, 0xdc, 0xd0, 0xb0, 0xf5, 0x62, 0x09, 0xab, 0x93, 0x8c, 0x21, 0xaf, 0xb7, 0x66, 0xa9, 0xfc, 0xd2, 0xaa, 0xd8, 0xd4, 0x79, 0xf2, 0x55, 0x3a, 0xef, 0x46, 0x03, 0xec, 0x64, 0x2f, 0x68, 0xea, 0x9f, 0x9d, 0x1d, 0xd2, 0x42, 0xd0, 0x13, 0x6c, 0xe0, 0xe1, 0xed, 0x9c, 0x59, 0x46, 0x85, 0xaf, 0x41, 0xc4, 0x6a, 0x2d, 0x06, 0x7a}); v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("secret", new_key->type()); EXPECT_FALSE(new_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageVerify, new_key->Key().Usages()); // Check that it can successfully verify a signature. Vector<uint8_t> message{1, 2, 3}; Vector<uint8_t> signature{0x91, 0xc8, 0x54, 0xc3, 0x19, 0x4e, 0xc5, 0x6c, 0x2d, 0x18, 0x91, 0x88, 0xd0, 0x56, 0x4d, 0xb6, 0x46, 0xc8, 0xb2, 0xa4, 0x2e, 0x1f, 0x0d, 0xe2, 0xd6, 0x60, 0xf9, 0xee, 0xb7, 0xd4, 0x55, 0x12}; WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdHmac, nullptr); EXPECT_TRUE(SyncVerifySignature(script_state, algorithm, new_key->Key(), signature, message)); } // RSA-PSS-SHA256 uses RSA hashed key params. TEST(V8ScriptValueSerializerForModulesTest, RoundTripCryptoKeyRSAHashed) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Generate an RSA-PSS-SHA256 key pair. WebCryptoAlgorithm hash(kWebCryptoAlgorithmIdSha256, nullptr); std::unique_ptr<WebCryptoAlgorithmParams> generate_key_params( new WebCryptoRsaHashedKeyGenParams(hash, 1024, Vector<uint8_t>{1, 0, 1})); WebCryptoAlgorithm generate_key_algorithm(kWebCryptoAlgorithmIdRsaPss, std::move(generate_key_params)); CryptoKey* public_key; CryptoKey* private_key; std::tie(public_key, private_key) = SyncGenerateKeyPair(script_state, generate_key_algorithm, true, kWebCryptoKeyUsageSign | kWebCryptoKeyUsageVerify); // Round trip the private key and check the visible attributes. v8::Local<v8::Value> wrapper = ToV8(private_key, scope.GetScriptState()); v8::Local<v8::Value> result = RoundTripForModules(wrapper, scope); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_private_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("private", new_private_key->type()); EXPECT_TRUE(new_private_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageSign, new_private_key->Key().Usages()); // Check that the keys have the same PKCS8 representation. WebVector<uint8_t> key_raw = SyncExportKey(script_state, kWebCryptoKeyFormatPkcs8, private_key->Key()); WebVector<uint8_t> new_key_raw = SyncExportKey( script_state, kWebCryptoKeyFormatPkcs8, new_private_key->Key()); EXPECT_THAT(new_key_raw, ElementsAreArray(key_raw)); // Check that one can verify a message signed by the other. Vector<uint8_t> message{1, 2, 3}; WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdRsaPss, std::make_unique<WebCryptoRsaPssParams>(16)); WebVector<uint8_t> signature = SyncSign(script_state, algorithm, new_private_key->Key(), message); EXPECT_TRUE(SyncVerifySignature(script_state, algorithm, public_key->Key(), signature, message)); } TEST(V8ScriptValueSerializerForModulesTest, DecodeCryptoKeyRSAHashed) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Decode an RSA-PSS-SHA256 public key (extractable, verify only). scoped_refptr<SerializedScriptValue> input = SerializedValue( {0xff, 0x09, 0x3f, 0x00, 0x4b, 0x04, 0x0d, 0x01, 0x80, 0x08, 0x03, 0x01, 0x00, 0x01, 0x06, 0x11, 0xa2, 0x01, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xae, 0xef, 0x7f, 0xee, 0x3a, 0x48, 0x48, 0xea, 0xce, 0x18, 0x0b, 0x86, 0x34, 0x6c, 0x1d, 0xc5, 0xe8, 0xea, 0xab, 0x33, 0xd0, 0x6f, 0x63, 0x82, 0x37, 0x18, 0x83, 0x01, 0x3d, 0x11, 0xe3, 0x03, 0x79, 0x2c, 0x0a, 0x79, 0xe6, 0xf5, 0x14, 0x73, 0x5f, 0x50, 0xa8, 0x17, 0x10, 0x58, 0x59, 0x20, 0x09, 0x54, 0x56, 0xe0, 0x86, 0x07, 0x5f, 0xab, 0x9c, 0x86, 0xb1, 0x80, 0xcb, 0x72, 0x5e, 0x55, 0x8b, 0x83, 0x98, 0xbf, 0xed, 0xbe, 0xdf, 0xdc, 0x6b, 0xff, 0xcf, 0x50, 0xee, 0xcc, 0x7c, 0xb4, 0x8c, 0x68, 0x75, 0x66, 0xf2, 0x21, 0x0d, 0xf5, 0x50, 0xdd, 0x06, 0x29, 0x57, 0xf7, 0x44, 0x42, 0x3d, 0xd9, 0x30, 0xb0, 0x8a, 0x5e, 0x8f, 0xea, 0xff, 0x45, 0xa0, 0x1d, 0x04, 0xbe, 0xc5, 0x82, 0xd3, 0x69, 0x4e, 0xcd, 0x14, 0x7b, 0xf5, 0x00, 0x3c, 0xb1, 0x19, 0x24, 0xae, 0x8d, 0x22, 0xb5, 0x02, 0x03, 0x01, 0x00, 0x01}); v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_public_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("public", new_public_key->type()); EXPECT_TRUE(new_public_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageVerify, new_public_key->Key().Usages()); // Check that it can successfully verify a signature. Vector<uint8_t> message{1, 2, 3}; Vector<uint8_t> signature{ 0x9b, 0x61, 0xc8, 0x4b, 0x1c, 0xe5, 0x24, 0xe6, 0x54, 0x73, 0x1a, 0xb5, 0xe3, 0x22, 0xc7, 0xd1, 0x36, 0x3d, 0x85, 0x99, 0x26, 0x45, 0xcc, 0x54, 0x98, 0x1f, 0xf3, 0x9d, 0x32, 0x87, 0xdc, 0xbb, 0xb6, 0x3a, 0xa4, 0x6d, 0xd4, 0xb5, 0x52, 0x83, 0x24, 0x02, 0xc7, 0x62, 0x1f, 0xb7, 0x27, 0x2b, 0x5a, 0x54, 0x59, 0x17, 0x81, 0x8a, 0xf5, 0x0c, 0x17, 0x01, 0x45, 0x3f, 0x14, 0xf2, 0x3c, 0x27, 0x4d, 0xfa, 0xc0, 0x0a, 0x82, 0x4b, 0xb2, 0xf4, 0x7b, 0x14, 0x1b, 0xd8, 0xbc, 0xe9, 0x2e, 0xd4, 0x55, 0x27, 0x62, 0x83, 0x11, 0xed, 0xc2, 0x81, 0x7d, 0xa9, 0x4f, 0xe0, 0xef, 0x0e, 0xa5, 0xa5, 0xc6, 0x40, 0x46, 0xbf, 0x90, 0x19, 0xfc, 0xc8, 0x51, 0x0e, 0x0f, 0x62, 0xeb, 0x17, 0x68, 0x1f, 0xbd, 0xfa, 0xf7, 0xd6, 0x1f, 0xa4, 0x7c, 0x9e, 0x9e, 0xb1, 0x96, 0x8f, 0xe6, 0x5e, 0x89, 0x99}; WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdRsaPss, std::make_unique<WebCryptoRsaPssParams>(16)); EXPECT_TRUE(SyncVerifySignature(script_state, algorithm, new_public_key->Key(), signature, message)); } // ECDSA uses EC key params. TEST(V8ScriptValueSerializerForModulesTest, RoundTripCryptoKeyEC) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Generate an ECDSA key pair with the NIST P-256 curve. std::unique_ptr<WebCryptoAlgorithmParams> generate_key_params( new WebCryptoEcKeyGenParams(kWebCryptoNamedCurveP256)); WebCryptoAlgorithm generate_key_algorithm(kWebCryptoAlgorithmIdEcdsa, std::move(generate_key_params)); CryptoKey* public_key; CryptoKey* private_key; std::tie(public_key, private_key) = SyncGenerateKeyPair(script_state, generate_key_algorithm, true, kWebCryptoKeyUsageSign | kWebCryptoKeyUsageVerify); // Round trip the private key and check the visible attributes. v8::Local<v8::Value> wrapper = ToV8(private_key, scope.GetScriptState()); v8::Local<v8::Value> result = RoundTripForModules(wrapper, scope); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_private_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("private", new_private_key->type()); EXPECT_TRUE(new_private_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageSign, new_private_key->Key().Usages()); // Check that the keys have the same PKCS8 representation. WebVector<uint8_t> key_raw = SyncExportKey(script_state, kWebCryptoKeyFormatPkcs8, private_key->Key()); WebVector<uint8_t> new_key_raw = SyncExportKey( script_state, kWebCryptoKeyFormatPkcs8, new_private_key->Key()); EXPECT_THAT(new_key_raw, ElementsAreArray(key_raw)); // Check that one can verify a message signed by the other. WebCryptoAlgorithm hash(kWebCryptoAlgorithmIdSha256, nullptr); Vector<uint8_t> message{1, 2, 3}; WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdEcdsa, std::make_unique<WebCryptoEcdsaParams>(hash)); WebVector<uint8_t> signature = SyncSign(script_state, algorithm, new_private_key->Key(), message); EXPECT_TRUE(SyncVerifySignature(script_state, algorithm, public_key->Key(), signature, message)); } TEST(V8ScriptValueSerializerForModulesTest, DecodeCryptoKeyEC) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Decode an ECDSA public key with the NIST P-256 curve (extractable). scoped_refptr<SerializedScriptValue> input = SerializedValue( {0xff, 0x09, 0x3f, 0x00, 0x4b, 0x05, 0x0e, 0x01, 0x01, 0x11, 0x5b, 0x30, 0x59, 0x30, 0x13, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0x03, 0x42, 0x00, 0x04, 0xfe, 0x16, 0x70, 0x29, 0x07, 0x2c, 0x11, 0xbf, 0xcf, 0xb7, 0x9d, 0x54, 0x35, 0x3d, 0xc7, 0x85, 0x66, 0x26, 0xa5, 0xda, 0x69, 0x4c, 0x07, 0xd5, 0x74, 0xcb, 0x93, 0xf4, 0xdb, 0x7e, 0x38, 0x3c, 0xa8, 0x98, 0x2a, 0x6f, 0xb2, 0xf5, 0x48, 0x73, 0x2f, 0x59, 0x21, 0xa0, 0xa9, 0xf5, 0x6e, 0x37, 0x0c, 0xfc, 0x5b, 0x68, 0x0e, 0x19, 0x5b, 0xd3, 0x4f, 0xb4, 0x0e, 0x1c, 0x31, 0x5a, 0xaa, 0x2d}); v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_public_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("public", new_public_key->type()); EXPECT_TRUE(new_public_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageVerify, new_public_key->Key().Usages()); // Check that it can successfully verify a signature. Vector<uint8_t> message{1, 2, 3}; Vector<uint8_t> signature{ 0xee, 0x63, 0xa2, 0xa3, 0x87, 0x6c, 0x9f, 0xc5, 0x64, 0x12, 0x87, 0x0d, 0xc7, 0xff, 0x3c, 0xd2, 0x6c, 0x2b, 0x2c, 0x0b, 0x2b, 0x8d, 0x3c, 0xe0, 0x3f, 0xd3, 0xfc, 0x28, 0xf0, 0xa1, 0x22, 0x69, 0x0a, 0x33, 0x4d, 0x48, 0x97, 0xad, 0x67, 0xa9, 0x6e, 0x24, 0xe7, 0x31, 0x09, 0xdb, 0xa8, 0x92, 0x48, 0x70, 0xa6, 0x6c, 0x46, 0x4d, 0x0b, 0x83, 0x27, 0x37, 0x69, 0x4d, 0x32, 0x63, 0x1e, 0x82}; WebCryptoAlgorithm hash(kWebCryptoAlgorithmIdSha256, nullptr); WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdEcdsa, std::make_unique<WebCryptoEcdsaParams>(hash)); EXPECT_TRUE(SyncVerifySignature(script_state, algorithm, new_public_key->Key(), signature, message)); } TEST(V8ScriptValueSerializerForModulesTest, RoundTripCryptoKeyNoParams) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Import some data into a PBKDF2 state. WebCryptoAlgorithm import_key_algorithm(kWebCryptoAlgorithmIdPbkdf2, nullptr); CryptoKey* key = SyncImportKey(script_state, kWebCryptoKeyFormatRaw, Vector<uint8_t>{1, 2, 3}, import_key_algorithm, false, kWebCryptoKeyUsageDeriveBits); // Round trip the key and check the visible attributes. v8::Local<v8::Value> wrapper = ToV8(key, scope.GetScriptState()); v8::Local<v8::Value> result = RoundTripForModules(wrapper, scope); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("secret", new_key->type()); EXPECT_FALSE(new_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageDeriveBits, new_key->Key().Usages()); // Check that the keys derive the same bits. WebCryptoAlgorithm hash(kWebCryptoAlgorithmIdSha256, nullptr); WebVector<uint8_t> salt(static_cast<size_t>(16)); std::unique_ptr<WebCryptoAlgorithmParams> params( new WebCryptoPbkdf2Params(hash, salt, 1)); WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdPbkdf2, std::move(params)); WebVector<uint8_t> bits_raw = SyncDeriveBits(script_state, algorithm, key->Key(), 16); WebVector<uint8_t> new_bits_raw = SyncDeriveBits(script_state, algorithm, new_key->Key(), 16); EXPECT_EQ(2u, bits_raw.size()); EXPECT_THAT(new_bits_raw, ElementsAreArray(bits_raw)); } TEST(V8ScriptValueSerializerForModulesTest, DecodeCryptoKeyNoParams) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Decode PBKDF2 state seeded with {1,2,3}. scoped_refptr<SerializedScriptValue> input = SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x06, 0x11, 0xa0, 0x02, 0x03, 0x01, 0x02, 0x03, 0x00}); v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); ASSERT_TRUE(V8CryptoKey::hasInstance(result, scope.GetIsolate())); CryptoKey* new_key = V8CryptoKey::ToImpl(result.As<v8::Object>()); EXPECT_EQ("secret", new_key->type()); EXPECT_FALSE(new_key->extractable()); EXPECT_EQ(kWebCryptoKeyUsageDeriveKey | kWebCryptoKeyUsageDeriveBits, new_key->Key().Usages()); // Check that it derives the right bits. WebCryptoAlgorithm hash(kWebCryptoAlgorithmIdSha256, nullptr); WebVector<uint8_t> salt(static_cast<size_t>(16)); std::unique_ptr<WebCryptoAlgorithmParams> params( new WebCryptoPbkdf2Params(hash, salt, 3)); WebCryptoAlgorithm algorithm(kWebCryptoAlgorithmIdPbkdf2, std::move(params)); WebVector<uint8_t> bits_raw = SyncDeriveBits(script_state, algorithm, new_key->Key(), 32); EXPECT_THAT(bits_raw, ElementsAre(0xd8, 0x0e, 0x2f, 0x69)); } TEST(V8ScriptValueSerializerForModulesTest, DecodeCryptoKeyInvalid) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Invalid algorithm ID. EXPECT_TRUE(V8ScriptValueDeserializerForModules( script_state, SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x06, 0x7f, 0xa0, 0x02, 0x03, 0x01, 0x02, 0x03, 0x00})) .Deserialize() ->IsNull()); // Algorithm ID / params type mismatch (AES params, RSA-OEAP ID). EXPECT_TRUE( V8ScriptValueDeserializerForModules( script_state, SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x01, 0x0a, 0x10, 0x04, 0x10, 0x7e, 0x25, 0xb2, 0xe8, 0x62, 0x3e, 0xd7, 0x83, 0x70, 0xa2, 0xae, 0x98, 0x79, 0x1b, 0xc5, 0xf7})) .Deserialize() ->IsNull()); // Invalid asymmetric key type. EXPECT_TRUE( V8ScriptValueDeserializerForModules( script_state, SerializedValue( {0xff, 0x09, 0x3f, 0x00, 0x4b, 0x05, 0x0e, 0x7f, 0x01, 0x11, 0x5b, 0x30, 0x59, 0x30, 0x13, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0x03, 0x42, 0x00, 0x04, 0xfe, 0x16, 0x70, 0x29, 0x07, 0x2c, 0x11, 0xbf, 0xcf, 0xb7, 0x9d, 0x54, 0x35, 0x3d, 0xc7, 0x85, 0x66, 0x26, 0xa5, 0xda, 0x69, 0x4c, 0x07, 0xd5, 0x74, 0xcb, 0x93, 0xf4, 0xdb, 0x7e, 0x38, 0x3c, 0xa8, 0x98, 0x2a, 0x6f, 0xb2, 0xf5, 0x48, 0x73, 0x2f, 0x59, 0x21, 0xa0, 0xa9, 0xf5, 0x6e, 0x37, 0x0c, 0xfc, 0x5b, 0x68, 0x0e, 0x19, 0x5b, 0xd3, 0x4f, 0xb4, 0x0e, 0x1c, 0x31, 0x5a, 0xaa, 0x2d})) .Deserialize() ->IsNull()); // Invalid named curve. EXPECT_TRUE( V8ScriptValueDeserializerForModules( script_state, SerializedValue( {0xff, 0x09, 0x3f, 0x00, 0x4b, 0x05, 0x0e, 0x01, 0x7f, 0x11, 0x5b, 0x30, 0x59, 0x30, 0x13, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0x03, 0x42, 0x00, 0x04, 0xfe, 0x16, 0x70, 0x29, 0x07, 0x2c, 0x11, 0xbf, 0xcf, 0xb7, 0x9d, 0x54, 0x35, 0x3d, 0xc7, 0x85, 0x66, 0x26, 0xa5, 0xda, 0x69, 0x4c, 0x07, 0xd5, 0x74, 0xcb, 0x93, 0xf4, 0xdb, 0x7e, 0x38, 0x3c, 0xa8, 0x98, 0x2a, 0x6f, 0xb2, 0xf5, 0x48, 0x73, 0x2f, 0x59, 0x21, 0xa0, 0xa9, 0xf5, 0x6e, 0x37, 0x0c, 0xfc, 0x5b, 0x68, 0x0e, 0x19, 0x5b, 0xd3, 0x4f, 0xb4, 0x0e, 0x1c, 0x31, 0x5a, 0xaa, 0x2d})) .Deserialize() ->IsNull()); // Unknown usage. EXPECT_TRUE(V8ScriptValueDeserializerForModules( script_state, SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x06, 0x11, 0x80, 0x40, 0x03, 0x01, 0x02, 0x03, 0x00})) .Deserialize() ->IsNull()); // AES key length (16384) that would overflow unsigned short after multiply by // 8 (to convert from bytes to bits). EXPECT_TRUE(V8ScriptValueDeserializerForModules( script_state, SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x01, 0x01, 0x80, 0x80, 0x02, 0x04, 0x10, 0x7e, 0x25, 0xb2, 0xe8, 0x62, 0x3e, 0xd7, 0x83, 0x70, 0xa2, 0xae, 0x98, 0x79, 0x1b, 0xc5, 0xf7})) .Deserialize() ->IsNull()); // HMAC length (1073741824) that would overflow 32-bit unsigned after multiply // by 8 (to convert from bytes to bits). EXPECT_TRUE( V8ScriptValueDeserializerForModules( script_state, SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x02, 0x80, 0x80, 0x80, 0x80, 0x04, 0x06, 0x10, 0x40, 0xd9, 0xbd, 0x0e, 0x84, 0x24, 0x3c, 0xb0, 0xbc, 0xee, 0x36, 0x61, 0xdc, 0xd0, 0xb0, 0xf5, 0x62, 0x09, 0xab, 0x93, 0x8c, 0x21, 0xaf, 0xb7, 0x66, 0xa9, 0xfc, 0xd2, 0xaa, 0xd8, 0xd4, 0x79, 0xf2, 0x55, 0x3a, 0xef, 0x46, 0x03, 0xec, 0x64, 0x2f, 0x68, 0xea, 0x9f, 0x9d, 0x1d, 0xd2, 0x42, 0xd0, 0x13, 0x6c, 0xe0, 0xe1, 0xed, 0x9c, 0x59, 0x46, 0x85, 0xaf, 0x41, 0xc4, 0x6a, 0x2d, 0x06, 0x7a})) .Deserialize() ->IsNull()); // Input ends before end of declared public exponent size. EXPECT_TRUE( V8ScriptValueDeserializerForModules( script_state, SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x4b, 0x04, 0x0d, 0x01, 0x80, 0x08, 0x03, 0x01})) .Deserialize() ->IsNull()); } TEST(V8ScriptValueSerializerForModulesTest, RoundTripDOMFileSystem) { V8TestingScope scope; DOMFileSystem* fs = DOMFileSystem::Create( scope.GetExecutionContext(), "http_example.com_0:Persistent", kFileSystemTypePersistent, KURL("filesystem:http://example.com/persistent/")); // At time of writing, this can only happen for filesystems from PPAPI. fs->MakeClonable(); v8::Local<v8::Value> wrapper = ToV8(fs, scope.GetScriptState()); v8::Local<v8::Value> result = RoundTripForModules(wrapper, scope); ASSERT_FALSE(result.IsEmpty()); ASSERT_TRUE(V8DOMFileSystem::hasInstance(result, scope.GetIsolate())); DOMFileSystem* new_fs = V8DOMFileSystem::ToImpl(result.As<v8::Object>()); EXPECT_EQ("http_example.com_0:Persistent", new_fs->name()); EXPECT_EQ(kFileSystemTypePersistent, new_fs->GetType()); EXPECT_EQ("filesystem:http://example.com/persistent/", new_fs->RootURL().GetString()); } TEST(V8ScriptValueSerializerForModulesTest, RoundTripDOMFileSystemNotClonable) { V8TestingScope scope; ExceptionState exception_state(scope.GetIsolate(), ExceptionState::kExecutionContext, "Window", "postMessage"); DOMFileSystem* fs = DOMFileSystem::Create( scope.GetExecutionContext(), "http_example.com_0:Persistent", kFileSystemTypePersistent, KURL("filesystem:http://example.com/persistent/0/")); ASSERT_FALSE(fs->Clonable()); v8::Local<v8::Value> wrapper = ToV8(fs, scope.GetScriptState()); EXPECT_FALSE(V8ScriptValueSerializer(scope.GetScriptState()) .Serialize(wrapper, exception_state)); EXPECT_TRUE(HadDOMExceptionInModulesTest( "DataCloneError", scope.GetScriptState(), exception_state)); } TEST(V8ScriptValueSerializerForModulesTest, DecodeDOMFileSystem) { V8TestingScope scope; // This is encoded data generated from Chromium (around M56). ScriptState* script_state = scope.GetScriptState(); scoped_refptr<SerializedScriptValue> input = SerializedValue( {0xff, 0x09, 0x3f, 0x00, 0x64, 0x01, 0x1d, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x5f, 0x30, 0x3a, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x3a, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x2f}); // Decode test. v8::Local<v8::Value> result = V8ScriptValueDeserializerForModules(script_state, input).Deserialize(); ASSERT_TRUE(V8DOMFileSystem::hasInstance(result, scope.GetIsolate())); DOMFileSystem* new_fs = V8DOMFileSystem::ToImpl(result.As<v8::Object>()); EXPECT_EQ("http_example.com_0:Persistent", new_fs->name()); EXPECT_EQ(kFileSystemTypePersistent, new_fs->GetType()); EXPECT_EQ("filesystem:http://example.com/persistent/", new_fs->RootURL().GetString()); } TEST(V8ScriptValueSerializerForModulesTest, DecodeInvalidDOMFileSystem) { V8TestingScope scope; ScriptState* script_state = scope.GetScriptState(); // Filesystem type out of range. EXPECT_TRUE( V8ScriptValueDeserializerForModules( script_state, SerializedValue({0xff, 0x09, 0x3f, 0x00, 0x64, 0x04, 0x1d, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x5f, 0x30, 0x3a, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x3a, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x2f })) .Deserialize() ->IsNull()); } } // namespace } // namespace blink
null
null
null
null
27,897
38,115
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
203,110
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#include "../evlist.h" #include "../cache.h" #include "../evsel.h" #include "../sort.h" #include "../hist.h" #include "../helpline.h" #include "gtk.h" #include <signal.h> void perf_gtk__signal(int sig) { perf_gtk__exit(false); psignal(sig, "perf"); } void perf_gtk__resize_window(GtkWidget *window) { GdkRectangle rect; GdkScreen *screen; int monitor; int height; int width; screen = gtk_widget_get_screen(window); monitor = gdk_screen_get_monitor_at_window(screen, window->window); gdk_screen_get_monitor_geometry(screen, monitor, &rect); width = rect.width * 3 / 4; height = rect.height * 3 / 4; gtk_window_resize(GTK_WINDOW(window), width, height); } const char *perf_gtk__get_percent_color(double percent) { if (percent >= MIN_RED) return "<span fgcolor='red'>"; if (percent >= MIN_GREEN) return "<span fgcolor='dark green'>"; return NULL; } #ifdef HAVE_GTK_INFO_BAR_SUPPORT GtkWidget *perf_gtk__setup_info_bar(void) { GtkWidget *info_bar; GtkWidget *label; GtkWidget *content_area; info_bar = gtk_info_bar_new(); gtk_widget_set_no_show_all(info_bar, TRUE); label = gtk_label_new(""); gtk_widget_show(label); content_area = gtk_info_bar_get_content_area(GTK_INFO_BAR(info_bar)); gtk_container_add(GTK_CONTAINER(content_area), label); gtk_info_bar_add_button(GTK_INFO_BAR(info_bar), GTK_STOCK_OK, GTK_RESPONSE_OK); g_signal_connect(info_bar, "response", G_CALLBACK(gtk_widget_hide), NULL); pgctx->info_bar = info_bar; pgctx->message_label = label; return info_bar; } #endif GtkWidget *perf_gtk__setup_statusbar(void) { GtkWidget *stbar; unsigned ctxid; stbar = gtk_statusbar_new(); ctxid = gtk_statusbar_get_context_id(GTK_STATUSBAR(stbar), "perf report"); pgctx->statbar = stbar; pgctx->statbar_ctx_id = ctxid; return stbar; }
null
null
null
null
111,457
65,715
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
65,715
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_APPS_DIRECTORY_ACCESS_CONFIRMATION_DIALOG_H_ #define CHROME_BROWSER_UI_APPS_DIRECTORY_ACCESS_CONFIRMATION_DIALOG_H_ #include "base/callback_forward.h" #include "base/strings/string16.h" namespace content { class WebContents; } void CreateDirectoryAccessConfirmationDialog(bool writable, const base::string16& app_name, content::WebContents* web_contents, const base::Closure& on_accept, const base::Closure& on_cancel); #endif // CHROME_BROWSER_UI_APPS_DIRECTORY_ACCESS_CONFIRMATION_DIALOG_H_
null
null
null
null
62,578
1,308
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,365
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * JNI utility functions * * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <jni.h> #include <pthread.h> #include <stdlib.h> #include "libavutil/bprint.h" #include "libavutil/log.h" #include "config.h" #include "jni.h" #include "ffjni.h" static JavaVM *java_vm; static pthread_key_t current_env; static pthread_once_t once = PTHREAD_ONCE_INIT; static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; static void jni_detach_env(void *data) { if (java_vm) { (*java_vm)->DetachCurrentThread(java_vm); } } static void jni_create_pthread_key(void) { pthread_key_create(&current_env, jni_detach_env); } JNIEnv *ff_jni_get_env(void *log_ctx) { int ret = 0; JNIEnv *env = NULL; pthread_mutex_lock(&lock); if (java_vm == NULL) { java_vm = av_jni_get_java_vm(log_ctx); } if (!java_vm) { av_log(log_ctx, AV_LOG_ERROR, "No Java virtual machine has been registered\n"); goto done; } pthread_once(&once, jni_create_pthread_key); if ((env = pthread_getspecific(current_env)) != NULL) { goto done; } ret = (*java_vm)->GetEnv(java_vm, (void **)&env, JNI_VERSION_1_6); switch(ret) { case JNI_EDETACHED: if ((*java_vm)->AttachCurrentThread(java_vm, &env, NULL) != 0) { av_log(log_ctx, AV_LOG_ERROR, "Failed to attach the JNI environment to the current thread\n"); env = NULL; } else { pthread_setspecific(current_env, env); } break; case JNI_OK: break; case JNI_EVERSION: av_log(log_ctx, AV_LOG_ERROR, "The specified JNI version is not supported\n"); break; default: av_log(log_ctx, AV_LOG_ERROR, "Failed to get the JNI environment attached to this thread\n"); break; } done: pthread_mutex_unlock(&lock); return env; } char *ff_jni_jstring_to_utf_chars(JNIEnv *env, jstring string, void *log_ctx) { char *ret = NULL; const char *utf_chars = NULL; jboolean copy = 0; if (!string) { return NULL; } utf_chars = (*env)->GetStringUTFChars(env, string, &copy); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "String.getStringUTFChars() threw an exception\n"); return NULL; } ret = av_strdup(utf_chars); (*env)->ReleaseStringUTFChars(env, string, utf_chars); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "String.releaseStringUTFChars() threw an exception\n"); return NULL; } return ret; } jstring ff_jni_utf_chars_to_jstring(JNIEnv *env, const char *utf_chars, void *log_ctx) { jstring ret; ret = (*env)->NewStringUTF(env, utf_chars); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "NewStringUTF() threw an exception\n"); return NULL; } return ret; } int ff_jni_exception_get_summary(JNIEnv *env, jthrowable exception, char **error, void *log_ctx) { int ret = 0; AVBPrint bp; char *name = NULL; char *message = NULL; jclass class_class = NULL; jmethodID get_name_id = NULL; jclass exception_class = NULL; jmethodID get_message_id = NULL; jstring string = NULL; av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC); exception_class = (*env)->GetObjectClass(env, exception); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "Could not find Throwable class\n"); ret = AVERROR_EXTERNAL; goto done; } class_class = (*env)->GetObjectClass(env, exception_class); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "Could not find Throwable class's class\n"); ret = AVERROR_EXTERNAL; goto done; } get_name_id = (*env)->GetMethodID(env, class_class, "getName", "()Ljava/lang/String;"); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "Could not find method Class.getName()\n"); ret = AVERROR_EXTERNAL; goto done; } string = (*env)->CallObjectMethod(env, exception_class, get_name_id); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "Class.getName() threw an exception\n"); ret = AVERROR_EXTERNAL; goto done; } if (string) { name = ff_jni_jstring_to_utf_chars(env, string, log_ctx); (*env)->DeleteLocalRef(env, string); string = NULL; } get_message_id = (*env)->GetMethodID(env, exception_class, "getMessage", "()Ljava/lang/String;"); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "Could not find method java/lang/Throwable.getMessage()\n"); ret = AVERROR_EXTERNAL; goto done; } string = (*env)->CallObjectMethod(env, exception, get_message_id); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, "Throwable.getMessage() threw an exception\n"); ret = AVERROR_EXTERNAL; goto done; } if (string) { message = ff_jni_jstring_to_utf_chars(env, string, log_ctx); (*env)->DeleteLocalRef(env, string); string = NULL; } if (name && message) { av_bprintf(&bp, "%s: %s", name, message); } else if (name && !message) { av_bprintf(&bp, "%s occurred", name); } else if (!name && message) { av_bprintf(&bp, "Exception: %s", message); } else { av_log(log_ctx, AV_LOG_WARNING, "Could not retrieve exception name and message\n"); av_bprintf(&bp, "Exception occurred"); } ret = av_bprint_finalize(&bp, error); done: av_free(name); av_free(message); if (class_class) { (*env)->DeleteLocalRef(env, class_class); } if (exception_class) { (*env)->DeleteLocalRef(env, exception_class); } if (string) { (*env)->DeleteLocalRef(env, string); } return ret; } int ff_jni_exception_check(JNIEnv *env, int log, void *log_ctx) { int ret; jthrowable exception; char *message = NULL; if (!(*(env))->ExceptionCheck((env))) { return 0; } if (!log) { (*(env))->ExceptionClear((env)); return -1; } exception = (*env)->ExceptionOccurred(env); (*(env))->ExceptionClear((env)); if ((ret = ff_jni_exception_get_summary(env, exception, &message, log_ctx)) < 0) { (*env)->DeleteLocalRef(env, exception); return ret; } (*env)->DeleteLocalRef(env, exception); av_log(log_ctx, AV_LOG_ERROR, "%s\n", message); av_free(message); return -1; } int ff_jni_init_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx) { int i, ret = 0; jclass last_clazz = NULL; for (i = 0; jfields_mapping[i].name; i++) { int mandatory = jfields_mapping[i].mandatory; enum FFJniFieldType type = jfields_mapping[i].type; if (type == FF_JNI_CLASS) { jclass clazz; last_clazz = NULL; clazz = (*env)->FindClass(env, jfields_mapping[i].name); if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { goto done; } last_clazz = *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset) = global ? (*env)->NewGlobalRef(env, clazz) : clazz; if (global) { (*env)->DeleteLocalRef(env, clazz); } } else { if (!last_clazz) { ret = AVERROR_EXTERNAL; break; } switch(type) { case FF_JNI_FIELD: { jfieldID field_id = (*env)->GetFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { goto done; } *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id; break; } case FF_JNI_STATIC_FIELD: { jfieldID field_id = (*env)->GetStaticFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { goto done; } *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id; break; } case FF_JNI_METHOD: { jmethodID method_id = (*env)->GetMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { goto done; } *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id; break; } case FF_JNI_STATIC_METHOD: { jmethodID method_id = (*env)->GetStaticMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { goto done; } *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id; break; } default: av_log(log_ctx, AV_LOG_ERROR, "Unknown JNI field type\n"); ret = AVERROR(EINVAL); goto done; } ret = 0; } } done: if (ret < 0) { /* reset jfields in case of failure so it does not leak references */ ff_jni_reset_jfields(env, jfields, jfields_mapping, global, log_ctx); } return ret; } int ff_jni_reset_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx) { int i; for (i = 0; jfields_mapping[i].name; i++) { enum FFJniFieldType type = jfields_mapping[i].type; switch(type) { case FF_JNI_CLASS: { jclass clazz = *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset); if (!clazz) continue; if (global) { (*env)->DeleteGlobalRef(env, clazz); } else { (*env)->DeleteLocalRef(env, clazz); } *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; break; } case FF_JNI_FIELD: { *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; break; } case FF_JNI_STATIC_FIELD: { *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; break; } case FF_JNI_METHOD: { *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; break; } case FF_JNI_STATIC_METHOD: { *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; break; } default: av_log(log_ctx, AV_LOG_ERROR, "Unknown JNI field type\n"); } } return 0; }
null
null
null
null
70,420
32,914
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
32,914
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2014 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_POINT_LIST_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_POINT_LIST_H_ #include "third_party/blink/renderer/core/svg/properties/svg_list_property_helper.h" #include "third_party/blink/renderer/core/svg/svg_parsing_error.h" #include "third_party/blink/renderer/core/svg/svg_point.h" namespace blink { class SVGPointListTearOff; class SVGPointList final : public SVGListPropertyHelper<SVGPointList, SVGPoint> { public: typedef SVGPointListTearOff TearOffType; static SVGPointList* Create() { return new SVGPointList(); } ~SVGPointList() override; SVGParsingError SetValueAsString(const String&); // SVGPropertyBase: String ValueAsString() const override; void Add(SVGPropertyBase*, SVGElement*) override; void CalculateAnimatedValue(SVGAnimationElement*, float percentage, unsigned repeat_count, SVGPropertyBase* from_value, SVGPropertyBase* to_value, SVGPropertyBase* to_at_end_of_duration_value, SVGElement*) override; float CalculateDistance(SVGPropertyBase* to, SVGElement*) override; static AnimatedPropertyType ClassType() { return kAnimatedPoints; } AnimatedPropertyType GetType() const override { return ClassType(); } private: SVGPointList(); template <typename CharType> SVGParsingError Parse(const CharType*& ptr, const CharType* end); }; DEFINE_SVG_PROPERTY_TYPE_CASTS(SVGPointList); } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_POINT_LIST_H_
null
null
null
null
29,777
69,584
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
69,584
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * ------------------------------------------------------------- * * Module: sem_post.c * * Purpose: * Semaphores aren't actually part of the PThreads standard. * They are defined by the POSIX Standard: * * POSIX 1003.1b-1993 (POSIX.1b) * * ------------------------------------------------------------- * * -------------------------------------------------------------------------- * * Pthreads-win32 - POSIX Threads Library for Win32 * Copyright(C) 1998 John E. Bossom * Copyright(C) 1999,2005 Pthreads-win32 contributors * * Contact Email: rpj@callisto.canberra.edu.au * * The current list of contributors is contained * in the file CONTRIBUTORS included with the source * code distribution. The list can also be seen at the * following World Wide Web location: * http://sources.redhat.com/pthreads-win32/contributors.html * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library in the file COPYING.LIB; * if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "pthread.h" #include "semaphore.h" #include "implement.h" int sem_post (sem_t * sem) /* * ------------------------------------------------------ * DOCPUBLIC * This function posts a wakeup to a semaphore. * * PARAMETERS * sem * pointer to an instance of sem_t * * DESCRIPTION * This function posts a wakeup to a semaphore. If there * are waiting threads (or processes), one is awakened; * otherwise, the semaphore value is incremented by one. * * RESULTS * 0 successfully posted semaphore, * -1 failed, error in errno * ERRNO * EINVAL 'sem' is not a valid semaphore, * ENOSYS semaphores are not supported, * ERANGE semaphore count is too big * * ------------------------------------------------------ */ { int result = 0; sem_t s = *sem; if (s == NULL) { result = EINVAL; } else if ((result = pthread_mutex_lock (&s->lock)) == 0) { /* See sem_destroy.c */ if (*sem == NULL) { (void) pthread_mutex_unlock (&s->lock); result = EINVAL; return -1; } if (s->value < SEM_VALUE_MAX) { #if defined(NEED_SEM) if (++s->value <= 0 && !SetEvent(s->sem)) { s->value--; result = EINVAL; } #else if (++s->value <= 0 && !ReleaseSemaphore (s->sem, 1, NULL)) { s->value--; result = EINVAL; } #endif /* NEED_SEM */ } else { result = ERANGE; } (void) pthread_mutex_unlock (&s->lock); } if (result != 0) { errno = result; return -1; } return 0; } /* sem_post */
null
null
null
null
66,447
40,882
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,877
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _LINUX_FD_H #define _LINUX_FD_H #include <uapi/linux/fd.h> #ifdef CONFIG_COMPAT #include <linux/compat.h> struct compat_floppy_struct { compat_uint_t size; compat_uint_t sect; compat_uint_t head; compat_uint_t track; compat_uint_t stretch; unsigned char gap; unsigned char rate; unsigned char spec1; unsigned char fmt_gap; const compat_caddr_t name; }; #define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct) #endif #endif
null
null
null
null
114,224
25,787
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,787
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "extensions/browser/api/web_request/web_request_event_details.h" #include <utility> #include <vector> #include "base/callback.h" #include "base/memory/ptr_util.h" #include "base/strings/string_number_conversions.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/resource_request_info.h" #include "content/public/common/child_process_host.h" #include "extensions/browser/api/extensions_api_client.h" #include "extensions/browser/api/web_request/upload_data_presenter.h" #include "extensions/browser/api/web_request/web_request_api_constants.h" #include "extensions/browser/api/web_request/web_request_api_helpers.h" #include "extensions/browser/api/web_request/web_request_info.h" #include "extensions/browser/api/web_request/web_request_permissions.h" #include "extensions/browser/api/web_request/web_request_resource_type.h" #include "extensions/common/permissions/permissions_data.h" #include "ipc/ipc_message.h" #include "net/base/auth.h" #include "net/base/upload_data_stream.h" #include "net/http/http_request_headers.h" #include "net/http/http_response_headers.h" using extension_web_request_api_helpers::ExtraInfoSpec; namespace helpers = extension_web_request_api_helpers; namespace keys = extension_web_request_api_constants; namespace extensions { WebRequestEventDetails::WebRequestEventDetails(const WebRequestInfo& request, int extra_info_spec) : extra_info_spec_(extra_info_spec), render_process_id_(content::ChildProcessHost::kInvalidUniqueID), render_frame_id_(MSG_ROUTING_NONE) { dict_.SetString(keys::kMethodKey, request.method); dict_.SetString(keys::kRequestIdKey, base::NumberToString(request.id)); dict_.SetDouble(keys::kTimeStampKey, base::Time::Now().ToDoubleT() * 1000); dict_.SetString(keys::kTypeKey, WebRequestResourceTypeToString(request.web_request_type)); dict_.SetString(keys::kUrlKey, request.url.spec()); initiator_ = request.initiator; render_process_id_ = request.render_process_id; render_frame_id_ = request.frame_id; } WebRequestEventDetails::~WebRequestEventDetails() = default; void WebRequestEventDetails::SetRequestBody(WebRequestInfo* request) { if (!(extra_info_spec_ & ExtraInfoSpec::REQUEST_BODY)) return; request_body_ = std::move(request->request_body_data); } void WebRequestEventDetails::SetRequestHeaders( const net::HttpRequestHeaders& request_headers) { if (!(extra_info_spec_ & ExtraInfoSpec::REQUEST_HEADERS)) return; base::ListValue* headers = new base::ListValue(); for (net::HttpRequestHeaders::Iterator it(request_headers); it.GetNext();) headers->Append(helpers::CreateHeaderDictionary(it.name(), it.value())); request_headers_.reset(headers); } void WebRequestEventDetails::SetAuthInfo( const net::AuthChallengeInfo& auth_info) { dict_.SetBoolean(keys::kIsProxyKey, auth_info.is_proxy); if (!auth_info.scheme.empty()) dict_.SetString(keys::kSchemeKey, auth_info.scheme); if (!auth_info.realm.empty()) dict_.SetString(keys::kRealmKey, auth_info.realm); auto challenger = std::make_unique<base::DictionaryValue>(); challenger->SetString(keys::kHostKey, auth_info.challenger.host()); challenger->SetInteger(keys::kPortKey, auth_info.challenger.port()); dict_.Set(keys::kChallengerKey, std::move(challenger)); } void WebRequestEventDetails::SetResponseHeaders( const WebRequestInfo& request, const net::HttpResponseHeaders* response_headers) { if (!response_headers) { // Not all URLRequestJobs specify response headers. E.g. URLRequestFTPJob, // URLRequestFileJob and some redirects. dict_.SetInteger(keys::kStatusCodeKey, request.response_code); dict_.SetString(keys::kStatusLineKey, ""); } else { dict_.SetInteger(keys::kStatusCodeKey, response_headers->response_code()); dict_.SetString(keys::kStatusLineKey, response_headers->GetStatusLine()); } if (extra_info_spec_ & ExtraInfoSpec::RESPONSE_HEADERS) { base::ListValue* headers = new base::ListValue(); if (response_headers) { size_t iter = 0; std::string name; std::string value; while (response_headers->EnumerateHeaderLines(&iter, &name, &value)) { if (ExtensionsAPIClient::Get()->ShouldHideResponseHeader(request.url, name)) { continue; } headers->Append(helpers::CreateHeaderDictionary(name, value)); } } response_headers_.reset(headers); } } void WebRequestEventDetails::SetResponseSource(const WebRequestInfo& request) { dict_.SetBoolean(keys::kFromCache, request.response_from_cache); if (!request.response_ip.empty()) dict_.SetString(keys::kIpKey, request.response_ip); } void WebRequestEventDetails::SetFrameData( const ExtensionApiFrameIdMap::FrameData& frame_data) { dict_.SetInteger(keys::kTabIdKey, frame_data.tab_id); dict_.SetInteger(keys::kFrameIdKey, frame_data.frame_id); dict_.SetInteger(keys::kParentFrameIdKey, frame_data.parent_frame_id); } void WebRequestEventDetails::DetermineFrameDataOnUI() { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); content::RenderFrameHost* rfh = content::RenderFrameHost::FromID(render_process_id_, render_frame_id_); ExtensionApiFrameIdMap::FrameData frame_data = ExtensionApiFrameIdMap::Get()->GetFrameData(rfh); SetFrameData(frame_data); } void WebRequestEventDetails::DetermineFrameDataOnIO( const DeterminedFrameDataCallback& callback) { std::unique_ptr<WebRequestEventDetails> self(this); ExtensionApiFrameIdMap::Get()->GetFrameDataOnIO( render_process_id_, render_frame_id_, base::Bind(&WebRequestEventDetails::OnDeterminedFrameData, base::Unretained(this), base::Passed(&self), callback)); } std::unique_ptr<base::DictionaryValue> WebRequestEventDetails::GetFilteredDict( int extra_info_spec, const extensions::InfoMap* extension_info_map, const extensions::ExtensionId& extension_id, bool crosses_incognito) const { std::unique_ptr<base::DictionaryValue> result = dict_.CreateDeepCopy(); if ((extra_info_spec & ExtraInfoSpec::REQUEST_BODY) && request_body_) { result->SetKey(keys::kRequestBodyKey, request_body_->Clone()); } if ((extra_info_spec & ExtraInfoSpec::REQUEST_HEADERS) && request_headers_) { result->SetKey(keys::kRequestHeadersKey, request_headers_->Clone()); } if ((extra_info_spec & ExtraInfoSpec::RESPONSE_HEADERS) && response_headers_) { result->SetKey(keys::kResponseHeadersKey, response_headers_->Clone()); } // Only listeners with a permission for the initiator should recieve it. if (extension_info_map && initiator_) { int tab_id = -1; dict_.GetInteger(keys::kTabIdKey, &tab_id); if (initiator_->unique() || WebRequestPermissions::CanExtensionAccessInitiator( extension_info_map, extension_id, initiator_, tab_id, crosses_incognito)) { result->SetString(keys::kInitiatorKey, initiator_->Serialize()); } } return result; } std::unique_ptr<base::DictionaryValue> WebRequestEventDetails::GetAndClearDict() { std::unique_ptr<base::DictionaryValue> result(new base::DictionaryValue); dict_.Swap(result.get()); return result; } std::unique_ptr<WebRequestEventDetails> WebRequestEventDetails::CreatePublicSessionCopy() { std::unique_ptr<WebRequestEventDetails> copy(new WebRequestEventDetails); copy->initiator_ = initiator_; copy->render_process_id_ = render_process_id_; copy->render_frame_id_ = render_frame_id_; static const char* const kSafeAttributes[] = { "method", "requestId", "timeStamp", "type", "tabId", "frameId", "parentFrameId", "fromCache", "error", "ip", "statusLine", "statusCode" }; for (const char* safe_attr : kSafeAttributes) { base::Value* val = dict_.FindKey(safe_attr); if (val) copy->dict_.SetKey(safe_attr, val->Clone()); } // URL is stripped down to the origin. std::string url; dict_.GetString(keys::kUrlKey, &url); GURL gurl(url); copy->dict_.SetString(keys::kUrlKey, gurl.GetOrigin().spec()); return copy; } WebRequestEventDetails::WebRequestEventDetails() : extra_info_spec_(0), render_process_id_(0), render_frame_id_(0) {} void WebRequestEventDetails::OnDeterminedFrameData( std::unique_ptr<WebRequestEventDetails> self, const DeterminedFrameDataCallback& callback, const ExtensionApiFrameIdMap::FrameData& frame_data) { SetFrameData(frame_data); callback.Run(std::move(self)); } } // namespace extensions
null
null
null
null
22,650
40,166
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
40,166
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // A crazy linker test to test callbacks for delayed execution. #include <pthread.h> #include <stdio.h> #include <crazy_linker.h> #include "test_util.h" namespace { typedef void (*FunctionPtr)(); // Data block passed between callback poster and callback handler. class CallbackData { public: CallbackData() { callback_.handler = NULL; callback_.opaque = NULL; pthread_mutex_init(&mutex_, NULL); pthread_cond_init(&cond_, NULL); } crazy_callback_t callback_; pthread_mutex_t mutex_; pthread_cond_t cond_; }; bool PostCallback(crazy_callback_t* callback, void* poster_opaque) { printf("Post callback, poster_opaque %p, handler %p, opaque %p\n", poster_opaque, callback->handler, callback->opaque); CallbackData* callback_data = reinterpret_cast<CallbackData*>(poster_opaque); // Set callback_ and signal the arrival of the PostCallback() call. pthread_mutex_lock(&callback_data->mutex_); callback_data->callback_ = *callback; pthread_cond_signal(&callback_data->cond_); pthread_mutex_unlock(&callback_data->mutex_); return true; } void CheckAndRunCallback(CallbackData* callback_data) { printf("Run callback, callback_data %p\n", callback_data); if (!callback_data->callback_.handler) { Panic("Post for delayed execution not invoked\n"); } // Run the callback, then clear it. crazy_callback_run(&callback_data->callback_); callback_data->callback_.handler = NULL; callback_data->callback_.opaque = NULL; } struct ThreadData { crazy_library_t* library; crazy_context_t* context; }; void* ThreadBody(void *thread_arg) { const ThreadData* thread_data = reinterpret_cast<ThreadData*>(thread_arg); // Close the library, asynchronously. crazy_library_close_with_context(thread_data->library, thread_data->context); pthread_exit(NULL); } pthread_t AsyncCrazyLibraryCloseWithContext(crazy_library_t* library, crazy_context_t* context, CallbackData* callback_data) { printf("Async close, library %p, context %p\n", library, context); ThreadData thread_data = {library, context}; void* thread_arg = reinterpret_cast<void*>(&thread_data); // Clear the indication that the new thread has called PostCallback(). pthread_mutex_lock(&callback_data->mutex_); callback_data->callback_.handler = NULL; callback_data->callback_.opaque = NULL; pthread_mutex_unlock(&callback_data->mutex_); // Start the thread that closes the library. pthread_t thread; if (pthread_create(&thread, NULL, ThreadBody, thread_arg) != 0) { Panic("Failed to create thread for close\n"); } // Wait for the library close to call PostCallback() before returning. printf("Waiting for PostCallback() call\n"); pthread_mutex_lock(&callback_data->mutex_); while (!callback_data->callback_.handler) { pthread_cond_wait(&callback_data->cond_, &callback_data->mutex_); } pthread_mutex_unlock(&callback_data->mutex_); printf("Done waiting for PostCallback() call\n"); return thread; } } // namespace #define LIB_NAME "libcrazy_linker_tests_libfoo.so" int main() { crazy_context_t* context = crazy_context_create(); crazy_library_t* library; // DEBUG crazy_context_set_load_address(context, 0x20000000); // Set a callback poster. CallbackData callback_data; crazy_context_set_callback_poster(context, &PostCallback, &callback_data); crazy_callback_poster_t poster; void* poster_opaque; // Check that the API returns the values we set. crazy_context_get_callback_poster(context, &poster, &poster_opaque); if (poster != &PostCallback || poster_opaque != &callback_data) { Panic("Get callback poster error\n"); } // Load library if (!crazy_library_open(&library, LIB_NAME, context)) { Panic("Could not open library: %s\n", crazy_context_get_error(context)); } CheckAndRunCallback(&callback_data); // Find the "Foo" symbol. FunctionPtr foo_func; if (!crazy_library_find_symbol( library, "Foo", reinterpret_cast<void**>(&foo_func))) { Panic("Could not find 'Foo' in %s\n", LIB_NAME); } // Call it. (*foo_func)(); // Close the library. Because the close operation will wait for the // callback before returning, we have to run it in a separate thread, and // wait for it to call PostCallback() before continuing. pthread_t thread = AsyncCrazyLibraryCloseWithContext(library, context, &callback_data); CheckAndRunCallback(&callback_data); if (pthread_join(thread, NULL) != 0) { Panic("Failed to join thread for close\n"); } crazy_context_destroy(context); return 0; }
null
null
null
null
37,029
67,636
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
67,636
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/safe_browsing/download_protection/two_phase_uploader.h" #include <stdint.h> #include <limits> #include "base/bind.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/task_runner.h" #include "content/public/browser/browser_thread.h" #include "net/base/net_errors.h" #include "net/http/http_response_headers.h" #include "net/url_request/url_fetcher.h" #include "net/url_request/url_fetcher_delegate.h" #include "net/url_request/url_request_status.h" #include "services/network/public/cpp/shared_url_loader_factory.h" #include "services/network/public/cpp/simple_url_loader.h" namespace { // Header sent on initial request to start the two phase upload process. const char kStartHeader[] = "x-goog-resumable: start"; // Header returned on initial response with URL to use for the second phase. const char kLocationHeader[] = "Location"; const char kUploadContentType[] = "application/octet-stream"; class TwoPhaseUploaderImpl : public TwoPhaseUploader { public: TwoPhaseUploaderImpl( scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory, base::TaskRunner* file_task_runner, const GURL& base_url, const std::string& metadata, const base::FilePath& file_path, const FinishCallback& finish_callback, const net::NetworkTrafficAnnotationTag& traffic_annotation); ~TwoPhaseUploaderImpl() override; // Begins the upload process. void Start() override; void OnURLLoaderComplete(std::unique_ptr<std::string> response_body); private: void UploadMetadata(); void UploadFile(); void Finish(int net_error, int response_code, const std::string& response); State state_; scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory_; scoped_refptr<base::TaskRunner> file_task_runner_; GURL base_url_; GURL upload_url_; std::string metadata_; const base::FilePath file_path_; FinishCallback finish_callback_; net::NetworkTrafficAnnotationTag traffic_annotation_; std::unique_ptr<network::SimpleURLLoader> url_loader_; DISALLOW_COPY_AND_ASSIGN(TwoPhaseUploaderImpl); }; TwoPhaseUploaderImpl::TwoPhaseUploaderImpl( scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory, base::TaskRunner* file_task_runner, const GURL& base_url, const std::string& metadata, const base::FilePath& file_path, const FinishCallback& finish_callback, const net::NetworkTrafficAnnotationTag& traffic_annotation) : state_(STATE_NONE), url_loader_factory_(url_loader_factory), file_task_runner_(file_task_runner), base_url_(base_url), metadata_(metadata), file_path_(file_path), finish_callback_(finish_callback), traffic_annotation_(traffic_annotation) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); } TwoPhaseUploaderImpl::~TwoPhaseUploaderImpl() { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); } void TwoPhaseUploaderImpl::Start() { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); DCHECK_EQ(STATE_NONE, state_); UploadMetadata(); } void TwoPhaseUploaderImpl::OnURLLoaderComplete( std::unique_ptr<std::string> response_body) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); int response_code = 0; if (url_loader_->ResponseInfo() && url_loader_->ResponseInfo()->headers) response_code = url_loader_->ResponseInfo()->headers->response_code(); DVLOG(1) << __func__ << " " << url_loader_->GetFinalURL().spec() << " " << url_loader_->NetError() << " " << response_code; if (url_loader_->NetError() != net::OK) { LOG(ERROR) << "URLFetcher failed, err=" << url_loader_->NetError(); Finish(url_loader_->NetError(), response_code, std::string()); return; } switch (state_) { case UPLOAD_METADATA: { if (response_code != 201) { LOG(ERROR) << "Invalid response to initial request: " << response_code; Finish(net::OK, response_code, *response_body.get()); return; } std::string location; if (!url_loader_->ResponseInfo() || !url_loader_->ResponseInfo()->headers || !url_loader_->ResponseInfo()->headers->EnumerateHeader( nullptr, kLocationHeader, &location)) { LOG(ERROR) << "no location header"; Finish(net::OK, response_code, std::string()); return; } DVLOG(1) << "upload location: " << location; upload_url_ = GURL(location); UploadFile(); break; } case UPLOAD_FILE: if (response_code != 200) { LOG(ERROR) << "Invalid response to upload request: " << response_code; } else { state_ = STATE_SUCCESS; } Finish(net::OK, response_code, *response_body.get()); return; default: NOTREACHED(); } } void TwoPhaseUploaderImpl::UploadMetadata() { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); state_ = UPLOAD_METADATA; auto resource_request = std::make_unique<network::ResourceRequest>(); resource_request->url = base_url_; resource_request->method = "POST"; resource_request->headers.AddHeadersFromString(kStartHeader); url_loader_ = network::SimpleURLLoader::Create(std::move(resource_request), traffic_annotation_); url_loader_->SetAllowHttpErrorResults(true); url_loader_->AttachStringForUpload(metadata_, kUploadContentType); url_loader_->DownloadToStringOfUnboundedSizeUntilCrashAndDie( url_loader_factory_.get(), base::BindOnce(&TwoPhaseUploaderImpl::OnURLLoaderComplete, base::Unretained(this))); } void TwoPhaseUploaderImpl::UploadFile() { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); state_ = UPLOAD_FILE; auto resource_request = std::make_unique<network::ResourceRequest>(); resource_request->url = upload_url_; resource_request->method = "PUT"; url_loader_ = network::SimpleURLLoader::Create(std::move(resource_request), traffic_annotation_); url_loader_->SetAllowHttpErrorResults(true); url_loader_->AttachFileForUpload(file_path_, kUploadContentType); url_loader_->DownloadToStringOfUnboundedSizeUntilCrashAndDie( url_loader_factory_.get(), base::BindOnce(&TwoPhaseUploaderImpl::OnURLLoaderComplete, base::Unretained(this))); } void TwoPhaseUploaderImpl::Finish(int net_error, int response_code, const std::string& response) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); finish_callback_.Run(state_, net_error, response_code, response); } } // namespace // static TwoPhaseUploaderFactory* TwoPhaseUploader::factory_ = nullptr; // static std::unique_ptr<TwoPhaseUploader> TwoPhaseUploader::Create( scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory, base::TaskRunner* file_task_runner, const GURL& base_url, const std::string& metadata, const base::FilePath& file_path, const FinishCallback& finish_callback, const net::NetworkTrafficAnnotationTag& traffic_annotation) { if (!factory_) { return base::WrapUnique(new TwoPhaseUploaderImpl( url_loader_factory, file_task_runner, base_url, metadata, file_path, finish_callback, traffic_annotation)); } return TwoPhaseUploader::factory_->CreateTwoPhaseUploader( url_loader_factory, file_task_runner, base_url, metadata, file_path, finish_callback, traffic_annotation); }
null
null
null
null
64,499
28,730
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
193,725
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Atmel AT91 AIC (Advanced Interrupt Controller) driver * * Copyright (C) 2004 SAN People * Copyright (C) 2004 ATMEL * Copyright (C) Rick Bronson * Copyright (C) 2014 Free Electrons * * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/bitmap.h> #include <linux/types.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/irqdomain.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/io.h> #include <asm/exception.h> #include <asm/mach/irq.h> #include "irq-atmel-aic-common.h" /* Number of irq lines managed by AIC */ #define NR_AIC_IRQS 32 #define AT91_AIC_SMR(n) ((n) * 4) #define AT91_AIC_SVR(n) (0x80 + ((n) * 4)) #define AT91_AIC_IVR 0x100 #define AT91_AIC_FVR 0x104 #define AT91_AIC_ISR 0x108 #define AT91_AIC_IPR 0x10c #define AT91_AIC_IMR 0x110 #define AT91_AIC_CISR 0x114 #define AT91_AIC_IECR 0x120 #define AT91_AIC_IDCR 0x124 #define AT91_AIC_ICCR 0x128 #define AT91_AIC_ISCR 0x12c #define AT91_AIC_EOICR 0x130 #define AT91_AIC_SPU 0x134 #define AT91_AIC_DCR 0x138 static struct irq_domain *aic_domain; static asmlinkage void __exception_irq_entry aic_handle(struct pt_regs *regs) { struct irq_domain_chip_generic *dgc = aic_domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; u32 irqnr; u32 irqstat; irqnr = irq_reg_readl(gc, AT91_AIC_IVR); irqstat = irq_reg_readl(gc, AT91_AIC_ISR); if (!irqstat) irq_reg_writel(gc, 0, AT91_AIC_EOICR); else handle_domain_irq(aic_domain, irqnr, regs); } static int aic_retrigger(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* Enable interrupt on AIC5 */ irq_gc_lock(gc); irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); irq_gc_unlock(gc); return 0; } static int aic_set_type(struct irq_data *d, unsigned type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); unsigned int smr; int ret; smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq)); ret = aic_common_set_type(d, type, &smr); if (ret) return ret; irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq)); return 0; } #ifdef CONFIG_PM static void aic_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); irq_gc_lock(gc); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); irq_gc_unlock(gc); } static void aic_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); irq_gc_lock(gc); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); irq_gc_unlock(gc); } static void aic_pm_shutdown(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); irq_gc_lock(gc); irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); irq_gc_unlock(gc); } #else #define aic_suspend NULL #define aic_resume NULL #define aic_pm_shutdown NULL #endif /* CONFIG_PM */ static void __init aic_hw_init(struct irq_domain *domain) { struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); int i; /* * Perform 8 End Of Interrupt Command to make sure AIC * will not Lock out nIRQ */ for (i = 0; i < 8; i++) irq_reg_writel(gc, 0, AT91_AIC_EOICR); /* * Spurious Interrupt ID in Spurious Vector Register. * When there is no current interrupt, the IRQ Vector Register * reads the value stored in AIC_SPU */ irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU); /* No debugging in AIC: Debug (Protect) Control Register */ irq_reg_writel(gc, 0, AT91_AIC_DCR); /* Disable and clear all interrupts initially */ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); for (i = 0; i < 32; i++) irq_reg_writel(gc, i, AT91_AIC_SVR(i)); } static int aic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; unsigned long flags; unsigned smr; int idx; int ret; if (!dgc) return -EINVAL; ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, out_hwirq, out_type); if (ret) return ret; idx = intspec[0] / dgc->irqs_per_chip; if (idx >= dgc->num_chips) return -EINVAL; gc = dgc->gc[idx]; irq_gc_lock_irqsave(gc, flags); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); irq_gc_unlock_irqrestore(gc, flags); return ret; } static const struct irq_domain_ops aic_irq_ops = { .map = irq_map_generic_chip, .xlate = aic_irq_domain_xlate, }; static void __init at91rm9200_aic_irq_fixup(struct device_node *root) { aic_common_rtc_irq_fixup(root); } static void __init at91sam9260_aic_irq_fixup(struct device_node *root) { aic_common_rtt_irq_fixup(root); } static void __init at91sam9g45_aic_irq_fixup(struct device_node *root) { aic_common_rtc_irq_fixup(root); aic_common_rtt_irq_fixup(root); } static const struct of_device_id aic_irq_fixups[] __initconst = { { .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup }, { .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup }, { .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup }, { .compatible = "atmel,at91sam9rl", .data = at91sam9g45_aic_irq_fixup }, { .compatible = "atmel,at91sam9x5", .data = at91rm9200_aic_irq_fixup }, { .compatible = "atmel,at91sam9260", .data = at91sam9260_aic_irq_fixup }, { .compatible = "atmel,at91sam9261", .data = at91sam9260_aic_irq_fixup }, { .compatible = "atmel,at91sam9263", .data = at91sam9260_aic_irq_fixup }, { .compatible = "atmel,at91sam9g20", .data = at91sam9260_aic_irq_fixup }, { /* sentinel */ }, }; static int __init aic_of_init(struct device_node *node, struct device_node *parent) { struct irq_chip_generic *gc; struct irq_domain *domain; if (aic_domain) return -EEXIST; domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic", NR_AIC_IRQS, aic_irq_fixups); if (IS_ERR(domain)) return PTR_ERR(domain); aic_domain = domain; gc = irq_get_domain_generic_chip(domain, 0); gc->chip_types[0].regs.eoi = AT91_AIC_EOICR; gc->chip_types[0].regs.enable = AT91_AIC_IECR; gc->chip_types[0].regs.disable = AT91_AIC_IDCR; gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; gc->chip_types[0].chip.irq_retrigger = aic_retrigger; gc->chip_types[0].chip.irq_set_type = aic_set_type; gc->chip_types[0].chip.irq_suspend = aic_suspend; gc->chip_types[0].chip.irq_resume = aic_resume; gc->chip_types[0].chip.irq_pm_shutdown = aic_pm_shutdown; aic_hw_init(domain); set_handle_irq(aic_handle); return 0; } IRQCHIP_DECLARE(at91rm9200_aic, "atmel,at91rm9200-aic", aic_of_init);
null
null
null
null
102,072
33,961
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
33,961
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/layout/ng/ng_physical_container_fragment.h" #include "third_party/blink/renderer/core/editing/position_with_affinity.h" #include "third_party/blink/renderer/core/layout/layout_object.h" #include "third_party/blink/renderer/core/layout/ng/geometry/ng_logical_offset.h" #include "third_party/blink/renderer/core/layout/ng/geometry/ng_logical_size.h" #include "third_party/blink/renderer/core/layout/ng/inline/ng_physical_line_box_fragment.h" #include "third_party/blink/renderer/core/layout/ng/ng_fragment.h" #include "third_party/blink/renderer/core/layout/ng/ng_physical_box_fragment.h" #include "third_party/blink/renderer/core/style/computed_style.h" namespace blink { namespace { NGLogicalOffset ChildLogicalOffsetInParent( const NGPhysicalContainerFragment& parent, const NGPhysicalFragment& child) { return child.Offset().ConvertToLogical(parent.Style().GetWritingMode(), parent.Style().Direction(), parent.Size(), child.Size()); } NGLogicalSize ChildLogicalSizeInParent( const NGPhysicalContainerFragment& parent, const NGPhysicalFragment& child) { return NGFragment(parent.Style().GetWritingMode(), child).Size(); } Optional<PositionWithAffinity> PositionForPointInChild( const NGPhysicalFragment& child, const NGPhysicalOffset& point) { const NGPhysicalOffset& child_point = point - child.Offset(); // We must fallback to legacy for old layout roots. We also fallback (to // LayoutNGMixin::PositionForPoint()) for NG block layout, so that we can // utilize LayoutBlock::PositionForPoint() that resolves the position in block // layout. // TODO(xiaochengh): Don't fallback to legacy for NG block layout. const PositionWithAffinity result = (child.IsBlockFlow() || child.IsOldLayoutRoot()) ? child.GetLayoutObject()->PositionForPoint( child_point.ToLayoutPoint()) : child.PositionForPoint(child_point); if (result.IsNotNull()) return result; return WTF::nullopt; } } // namespace NGPhysicalContainerFragment::NGPhysicalContainerFragment( LayoutObject* layout_object, const ComputedStyle& style, NGPhysicalSize size, NGFragmentType type, unsigned sub_type, Vector<scoped_refptr<NGPhysicalFragment>>& children, const NGPhysicalOffsetRect& contents_visual_rect, scoped_refptr<NGBreakToken> break_token) : NGPhysicalFragment(layout_object, style, NGStyleVariant::kStandard, size, type, sub_type, std::move(break_token)), children_(std::move(children)), contents_visual_rect_(contents_visual_rect) { DCHECK(children.IsEmpty()); // Ensure move semantics is used. } PositionWithAffinity NGPhysicalContainerFragment::PositionForPointInInlineLevelBox( const NGPhysicalOffset& point) const { DCHECK(IsInline() || IsLineBox()) << ToString(); DCHECK(!IsBlockFlow()) << ToString(); const NGLogicalOffset logical_point = point.ConvertToLogical( Style().GetWritingMode(), Style().Direction(), Size(), NGPhysicalSize()); const LayoutUnit inline_point = logical_point.inline_offset; // Stores the closest child before |point| in the inline direction. Used if we // can't find any child |point| falls in to resolve the position. const NGPhysicalFragment* closest_child_before = nullptr; LayoutUnit closest_child_before_inline_offset = LayoutUnit::Min(); // Stores the closest child after |point| in the inline direction. Used if we // can't find any child |point| falls in to resolve the position. const NGPhysicalFragment* closest_child_after = nullptr; LayoutUnit closest_child_after_inline_offset = LayoutUnit::Max(); for (const auto& child : children_) { const LayoutUnit child_inline_min = ChildLogicalOffsetInParent(*this, *child).inline_offset; const LayoutUnit child_inline_max = child_inline_min + ChildLogicalSizeInParent(*this, *child).inline_size; // Try to resolve if |point| falls in any child in inline direction. if (inline_point >= child_inline_min && inline_point <= child_inline_max) { if (auto child_position = PositionForPointInChild(*child, point)) return child_position.value(); continue; } if (inline_point < child_inline_min) { if (child_inline_min < closest_child_after_inline_offset) { closest_child_after = child.get(); closest_child_after_inline_offset = child_inline_min; } } if (inline_point > child_inline_max) { if (child_inline_max > closest_child_before_inline_offset) { closest_child_before = child.get(); closest_child_before_inline_offset = child_inline_max; } } } if (closest_child_after) { if (auto child_position = PositionForPointInChild(*closest_child_after, point)) return child_position.value(); } if (closest_child_before) { if (auto child_position = PositionForPointInChild(*closest_child_before, point)) return child_position.value(); } return PositionWithAffinity(); } PositionWithAffinity NGPhysicalContainerFragment::PositionForPointInInlineFormattingContext( const NGPhysicalOffset& point) const { DCHECK(IsBlockFlow()) << ToString(); DCHECK(IsBox()) << ToString(); DCHECK(ToNGPhysicalBoxFragment(this)->ChildrenInline()) << ToString(); const NGLogicalOffset logical_point = point.ConvertToLogical( Style().GetWritingMode(), Style().Direction(), Size(), NGPhysicalSize()); const LayoutUnit block_point = logical_point.block_offset; // Stores the closest line box child above |point| in the block direction. // Used if we can't find any child |point| falls in to resolve the position. const NGPhysicalLineBoxFragment* closest_line_before = nullptr; LayoutUnit closest_line_before_block_offset = LayoutUnit::Min(); // Stores the closest line box child below |point| in the block direction. // Used if we can't find any child |point| falls in to resolve the position. const NGPhysicalLineBoxFragment* closest_line_after = nullptr; LayoutUnit closest_line_after_block_offset = LayoutUnit::Max(); for (const auto& child : children_) { // Try to resolve if |point| falls in a non-line-box child completely. if (!child->IsLineBox()) { if (point.left >= child->Offset().left && point.left <= child->Offset().left + child->Size().width && point.top >= child->Offset().top && point.top <= child->Offset().top + child->Size().height) { if (auto child_position = PositionForPointInChild(*child, point)) return child_position.value(); } continue; } if (!child->IsLineBox() || ToNGPhysicalLineBoxFragment(*child).Children().IsEmpty()) continue; const LayoutUnit line_min = ChildLogicalOffsetInParent(*this, *child).block_offset; const LayoutUnit line_max = line_min + ChildLogicalSizeInParent(*this, *child).block_size; // Try to resolve if |point| falls in a line box in block direction. // Hitting on line bottom doesn't count, to match legacy behavior. // TODO(xiaochengh): Consider floats. if (block_point >= line_min && block_point < line_max) { if (auto child_position = PositionForPointInChild(*child, point)) return child_position.value(); continue; } if (block_point < line_min) { if (line_min < closest_line_after_block_offset) { closest_line_after = ToNGPhysicalLineBoxFragment(child.get()); closest_line_after_block_offset = line_min; } } if (block_point >= line_max) { if (line_max > closest_line_before_block_offset) { closest_line_before = ToNGPhysicalLineBoxFragment(child.get()); closest_line_before_block_offset = line_max; } } } if (closest_line_after) { if (auto child_position = PositionForPointInChild(*closest_line_after, point)) return child_position.value(); } if (closest_line_before) { if (auto child_position = PositionForPointInChild(*closest_line_before, point)) return child_position.value(); } // TODO(xiaochengh): Looking at only the closest lines may not be enough, // when we have multiple lines full of pseudo elements. Fix it. // TODO(xiaochengh): Consider floats. return PositionWithAffinity(); } } // namespace blink
null
null
null
null
30,824
33,390
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
198,385
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* This file is provided under a dual BSD/GPLv2 license. When using or redistributing this file, you may do so under either license. GPL LICENSE SUMMARY Copyright(c) 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Contact Information: qat-linux@intel.com BSD LICENSE Copyright(c) 2014 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/seq_file.h> #include "adf_accel_devices.h" #include "adf_transport_internal.h" #include "adf_transport_access_macros.h" static DEFINE_MUTEX(ring_read_lock); static DEFINE_MUTEX(bank_read_lock); static void *adf_ring_start(struct seq_file *sfile, loff_t *pos) { struct adf_etr_ring_data *ring = sfile->private; mutex_lock(&ring_read_lock); if (*pos == 0) return SEQ_START_TOKEN; if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) return NULL; return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); } static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos) { struct adf_etr_ring_data *ring = sfile->private; if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) return NULL; return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); } static int adf_ring_show(struct seq_file *sfile, void *v) { struct adf_etr_ring_data *ring = sfile->private; struct adf_etr_bank_data *bank = ring->bank; void __iomem *csr = ring->bank->csr_addr; if (v == SEQ_START_TOKEN) { int head, tail, empty; head = READ_CSR_RING_HEAD(csr, bank->bank_number, ring->ring_number); tail = READ_CSR_RING_TAIL(csr, bank->bank_number, ring->ring_number); empty = READ_CSR_E_STAT(csr, bank->bank_number); seq_puts(sfile, "------- Ring configuration -------\n"); seq_printf(sfile, "ring name: %s\n", ring->ring_debug->ring_name); seq_printf(sfile, "ring num %d, bank num %d\n", ring->ring_number, ring->bank->bank_number); seq_printf(sfile, "head %x, tail %x, empty: %d\n", head, tail, (empty & 1 << ring->ring_number) >> ring->ring_number); seq_printf(sfile, "ring size %d, msg size %d\n", ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size), ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); seq_puts(sfile, "----------- Ring data ------------\n"); return 0; } seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4, v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false); return 0; } static void adf_ring_stop(struct seq_file *sfile, void *v) { mutex_unlock(&ring_read_lock); } static const struct seq_operations adf_ring_sops = { .start = adf_ring_start, .next = adf_ring_next, .stop = adf_ring_stop, .show = adf_ring_show }; static int adf_ring_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &adf_ring_sops); if (!ret) { struct seq_file *seq_f = file->private_data; seq_f->private = inode->i_private; } return ret; } static const struct file_operations adf_ring_debug_fops = { .open = adf_ring_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) { struct adf_etr_ring_debug_entry *ring_debug; char entry_name[8]; ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL); if (!ring_debug) return -ENOMEM; strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); snprintf(entry_name, sizeof(entry_name), "ring_%02d", ring->ring_number); ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR, ring->bank->bank_debug_dir, ring, &adf_ring_debug_fops); if (!ring_debug->debug) { pr_err("QAT: Failed to create ring debug entry.\n"); kfree(ring_debug); return -EFAULT; } ring->ring_debug = ring_debug; return 0; } void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) { if (ring->ring_debug) { debugfs_remove(ring->ring_debug->debug); kfree(ring->ring_debug); ring->ring_debug = NULL; } } static void *adf_bank_start(struct seq_file *sfile, loff_t *pos) { mutex_lock(&bank_read_lock); if (*pos == 0) return SEQ_START_TOKEN; if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK) return NULL; return pos; } static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos) { if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK) return NULL; return pos; } static int adf_bank_show(struct seq_file *sfile, void *v) { struct adf_etr_bank_data *bank = sfile->private; if (v == SEQ_START_TOKEN) { seq_printf(sfile, "------- Bank %d configuration -------\n", bank->bank_number); } else { int ring_id = *((int *)v) - 1; struct adf_etr_ring_data *ring = &bank->rings[ring_id]; void __iomem *csr = bank->csr_addr; int head, tail, empty; if (!(bank->ring_mask & 1 << ring_id)) return 0; head = READ_CSR_RING_HEAD(csr, bank->bank_number, ring->ring_number); tail = READ_CSR_RING_TAIL(csr, bank->bank_number, ring->ring_number); empty = READ_CSR_E_STAT(csr, bank->bank_number); seq_printf(sfile, "ring num %02d, head %04x, tail %04x, empty: %d\n", ring->ring_number, head, tail, (empty & 1 << ring->ring_number) >> ring->ring_number); } return 0; } static void adf_bank_stop(struct seq_file *sfile, void *v) { mutex_unlock(&bank_read_lock); } static const struct seq_operations adf_bank_sops = { .start = adf_bank_start, .next = adf_bank_next, .stop = adf_bank_stop, .show = adf_bank_show }; static int adf_bank_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &adf_bank_sops); if (!ret) { struct seq_file *seq_f = file->private_data; seq_f->private = inode->i_private; } return ret; } static const struct file_operations adf_bank_debug_fops = { .open = adf_bank_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct dentry *parent = accel_dev->transport->debug; char name[8]; snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); bank->bank_debug_dir = debugfs_create_dir(name, parent); if (!bank->bank_debug_dir) { pr_err("QAT: Failed to create bank debug dir.\n"); return -EFAULT; } bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR, bank->bank_debug_dir, bank, &adf_bank_debug_fops); if (!bank->bank_debug_cfg) { pr_err("QAT: Failed to create bank debug entry.\n"); debugfs_remove(bank->bank_debug_dir); return -EFAULT; } return 0; } void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank) { debugfs_remove(bank->bank_debug_cfg); debugfs_remove(bank->bank_debug_dir); }
null
null
null
null
106,732
11,052
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
176,047
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * mbus.h: Various defines for MBUS modules. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #ifndef _SPARC_MBUS_H #define _SPARC_MBUS_H #include <asm/ross.h> /* HyperSparc stuff */ #include <asm/viking.h> /* Ugh, bug city... */ enum mbus_module { HyperSparc = 0, Swift_ok = 4, Swift_bad_c = 5, Swift_lots_o_bugs = 6, Tsunami = 7, Viking_12 = 8, Viking_2x = 9, Viking_30 = 10, Viking_35 = 11, Viking_new = 12, TurboSparc = 13, SRMMU_INVAL_MOD = 14, }; extern enum mbus_module srmmu_modtype; extern unsigned int viking_rev, swift_rev, cypress_rev; /* HW Mbus module bugs we have to deal with */ #define HWBUG_COPYBACK_BROKEN 0x00000001 #define HWBUG_ASIFLUSH_BROKEN 0x00000002 #define HWBUG_VACFLUSH_BITROT 0x00000004 #define HWBUG_KERN_ACCBROKEN 0x00000008 #define HWBUG_KERN_CBITBROKEN 0x00000010 #define HWBUG_MODIFIED_BITROT 0x00000020 #define HWBUG_PC_BADFAULT_ADDR 0x00000040 #define HWBUG_SUPERSCALAR_BAD 0x00000080 #define HWBUG_PACINIT_BITROT 0x00000100 /* First the module type values. To find out which you have, just load * the mmu control register from ASI_M_MMUREG alternate address space and * shift the value right 28 bits. */ /* IMPL field means the company which produced the chip. */ #define MBUS_VIKING 0x4 /* bleech, Texas Instruments Module */ #define MBUS_LSI 0x3 /* LSI Logics */ #define MBUS_ROSS 0x1 /* Ross is nice */ #define MBUS_FMI 0x0 /* Fujitsu Microelectronics/Swift */ /* Ross Module versions */ #define ROSS_604_REV_CDE 0x0 /* revisions c, d, and e */ #define ROSS_604_REV_F 0x1 /* revision f */ #define ROSS_605 0xf /* revision a, a.1, and a.2 */ #define ROSS_605_REV_B 0xe /* revision b */ /* TI Viking Module versions */ #define VIKING_REV_12 0x1 /* Version 1.2 or SPARCclassic's CPU */ #define VIKING_REV_2 0x2 /* Version 2.1, 2.2, 2.3, and 2.4 */ #define VIKING_REV_30 0x3 /* Version 3.0 */ #define VIKING_REV_35 0x4 /* Version 3.5 */ /* LSI Logics. */ #define LSI_L64815 0x0 /* Fujitsu */ #define FMI_AURORA 0x4 /* MB8690x, a Swift module... */ #define FMI_TURBO 0x5 /* MB86907, a TurboSparc module... */ /* For multiprocessor support we need to be able to obtain the CPU id and * the MBUS Module id. */ /* The CPU ID is encoded in the trap base register, 20 bits to the left of * bit zero, with 2 bits being significant. */ #define TBR_ID_SHIFT 20 static inline int get_cpuid(void) { register int retval; __asm__ __volatile__("rd %%tbr, %0\n\t" "srl %0, %1, %0\n\t" : "=r" (retval) : "i" (TBR_ID_SHIFT)); return (retval & 3); } static inline int get_modid(void) { return (get_cpuid() | 0x8); } #endif /* !(_SPARC_MBUS_H) */
null
null
null
null
84,394
38,951
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
203,946
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * zbud.c * * Copyright (C) 2013, Seth Jennings, IBM * * Concepts based on zcache internal zbud allocator by Dan Magenheimer. * * zbud is an special purpose allocator for storing compressed pages. Contrary * to what its name may suggest, zbud is not a buddy allocator, but rather an * allocator that "buddies" two compressed pages together in a single memory * page. * * While this design limits storage density, it has simple and deterministic * reclaim properties that make it preferable to a higher density approach when * reclaim will be used. * * zbud works by storing compressed pages, or "zpages", together in pairs in a * single memory page called a "zbud page". The first buddy is "left * justified" at the beginning of the zbud page, and the last buddy is "right * justified" at the end of the zbud page. The benefit is that if either * buddy is freed, the freed buddy space, coalesced with whatever slack space * that existed between the buddies, results in the largest possible free region * within the zbud page. * * zbud also provides an attractive lower bound on density. The ratio of zpages * to zbud pages can not be less than 1. This ensures that zbud can never "do * harm" by using more pages to store zpages than the uncompressed zpages would * have used on their own. * * zbud pages are divided into "chunks". The size of the chunks is fixed at * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages * into chunks allows organizing unbuddied zbud pages into a manageable number * of unbuddied lists according to the number of free chunks available in the * zbud page. * * The zbud API differs from that of conventional allocators in that the * allocation function, zbud_alloc(), returns an opaque handle to the user, * not a dereferenceable pointer. The user must map the handle using * zbud_map() in order to get a usable pointer by which to access the * allocation data and unmap the handle with zbud_unmap() when operations * on the allocation data are complete. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/atomic.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/preempt.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zbud.h> #include <linux/zpool.h> /***************** * Structures *****************/ /* * NCHUNKS_ORDER determines the internal allocation granularity, effectively * adjusting internal fragmentation. It also determines the number of * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk * in allocated page is occupied by zbud header, NCHUNKS will be calculated to * 63 which shows the max number of free chunks in zbud page, also there will be * 63 freelists per pool. */ #define NCHUNKS_ORDER 6 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) #define CHUNK_SIZE (1 << CHUNK_SHIFT) #define ZHDR_SIZE_ALIGNED CHUNK_SIZE #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) /** * struct zbud_pool - stores metadata for each zbud pool * @lock: protects all pool fields and first|last_chunk fields of any * zbud page in the pool * @unbuddied: array of lists tracking zbud pages that only contain one buddy; * the lists each zbud page is added to depends on the size of * its free region. * @buddied: list tracking the zbud pages that contain two buddies; * these zbud pages are full * @lru: list tracking the zbud pages in LRU order by most recently * added buddy. * @pages_nr: number of zbud pages in the pool. * @ops: pointer to a structure of user defined operations specified at * pool creation time. * * This structure is allocated at pool creation time and maintains metadata * pertaining to a particular zbud pool. */ struct zbud_pool { spinlock_t lock; struct list_head unbuddied[NCHUNKS]; struct list_head buddied; struct list_head lru; u64 pages_nr; const struct zbud_ops *ops; #ifdef CONFIG_ZPOOL struct zpool *zpool; const struct zpool_ops *zpool_ops; #endif }; /* * struct zbud_header - zbud page metadata occupying the first chunk of each * zbud page. * @buddy: links the zbud page into the unbuddied/buddied lists in the pool * @lru: links the zbud page into the lru list in the pool * @first_chunks: the size of the first buddy in chunks, 0 if free * @last_chunks: the size of the last buddy in chunks, 0 if free */ struct zbud_header { struct list_head buddy; struct list_head lru; unsigned int first_chunks; unsigned int last_chunks; bool under_reclaim; }; /***************** * zpool ****************/ #ifdef CONFIG_ZPOOL static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle) { if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) return pool->zpool_ops->evict(pool->zpool, handle); else return -ENOENT; } static const struct zbud_ops zbud_zpool_ops = { .evict = zbud_zpool_evict }; static void *zbud_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool) { struct zbud_pool *pool; pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL); if (pool) { pool->zpool = zpool; pool->zpool_ops = zpool_ops; } return pool; } static void zbud_zpool_destroy(void *pool) { zbud_destroy_pool(pool); } static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) { return zbud_alloc(pool, size, gfp, handle); } static void zbud_zpool_free(void *pool, unsigned long handle) { zbud_free(pool, handle); } static int zbud_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed) { unsigned int total = 0; int ret = -EINVAL; while (total < pages) { ret = zbud_reclaim_page(pool, 8); if (ret < 0) break; total++; } if (reclaimed) *reclaimed = total; return ret; } static void *zbud_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { return zbud_map(pool, handle); } static void zbud_zpool_unmap(void *pool, unsigned long handle) { zbud_unmap(pool, handle); } static u64 zbud_zpool_total_size(void *pool) { return zbud_get_pool_size(pool) * PAGE_SIZE; } static struct zpool_driver zbud_zpool_driver = { .type = "zbud", .owner = THIS_MODULE, .create = zbud_zpool_create, .destroy = zbud_zpool_destroy, .malloc = zbud_zpool_malloc, .free = zbud_zpool_free, .shrink = zbud_zpool_shrink, .map = zbud_zpool_map, .unmap = zbud_zpool_unmap, .total_size = zbud_zpool_total_size, }; MODULE_ALIAS("zpool-zbud"); #endif /* CONFIG_ZPOOL */ /***************** * Helpers *****************/ /* Just to make the code easier to read */ enum buddy { FIRST, LAST }; /* Converts an allocation size in bytes to size in zbud chunks */ static int size_to_chunks(size_t size) { return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; } #define for_each_unbuddied_list(_iter, _begin) \ for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) /* Initializes the zbud header of a newly allocated zbud page */ static struct zbud_header *init_zbud_page(struct page *page) { struct zbud_header *zhdr = page_address(page); zhdr->first_chunks = 0; zhdr->last_chunks = 0; INIT_LIST_HEAD(&zhdr->buddy); INIT_LIST_HEAD(&zhdr->lru); zhdr->under_reclaim = 0; return zhdr; } /* Resets the struct page fields and frees the page */ static void free_zbud_page(struct zbud_header *zhdr) { __free_page(virt_to_page(zhdr)); } /* * Encodes the handle of a particular buddy within a zbud page * Pool lock should be held as this function accesses first|last_chunks */ static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud) { unsigned long handle; /* * For now, the encoded handle is actually just the pointer to the data * but this might not always be the case. A little information hiding. * Add CHUNK_SIZE to the handle if it is the first allocation to jump * over the zbud header in the first chunk. */ handle = (unsigned long)zhdr; if (bud == FIRST) /* skip over zbud header */ handle += ZHDR_SIZE_ALIGNED; else /* bud == LAST */ handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); return handle; } /* Returns the zbud page where a given handle is stored */ static struct zbud_header *handle_to_zbud_header(unsigned long handle) { return (struct zbud_header *)(handle & PAGE_MASK); } /* Returns the number of free chunks in a zbud page */ static int num_free_chunks(struct zbud_header *zhdr) { /* * Rather than branch for different situations, just use the fact that * free buddies have a length of zero to simplify everything. */ return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; } /***************** * API Functions *****************/ /** * zbud_create_pool() - create a new zbud pool * @gfp: gfp flags when allocating the zbud pool structure * @ops: user-defined operations for the zbud pool * * Return: pointer to the new zbud pool or NULL if the metadata allocation * failed. */ struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) { struct zbud_pool *pool; int i; pool = kzalloc(sizeof(struct zbud_pool), gfp); if (!pool) return NULL; spin_lock_init(&pool->lock); for_each_unbuddied_list(i, 0) INIT_LIST_HEAD(&pool->unbuddied[i]); INIT_LIST_HEAD(&pool->buddied); INIT_LIST_HEAD(&pool->lru); pool->pages_nr = 0; pool->ops = ops; return pool; } /** * zbud_destroy_pool() - destroys an existing zbud pool * @pool: the zbud pool to be destroyed * * The pool should be emptied before this function is called. */ void zbud_destroy_pool(struct zbud_pool *pool) { kfree(pool); } /** * zbud_alloc() - allocates a region of a given size * @pool: zbud pool from which to allocate * @size: size in bytes of the desired allocation * @gfp: gfp flags used if the pool needs to grow * @handle: handle of the new allocation * * This function will attempt to find a free region in the pool large enough to * satisfy the allocation request. A search of the unbuddied lists is * performed first. If no suitable free region is found, then a new page is * allocated and added to the pool to satisfy the request. * * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used * as zbud pool pages. * * Return: 0 if success and handle is set, otherwise -EINVAL if the size or * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate * a new page. */ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle) { int chunks, i, freechunks; struct zbud_header *zhdr = NULL; enum buddy bud; struct page *page; if (!size || (gfp & __GFP_HIGHMEM)) return -EINVAL; if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) return -ENOSPC; chunks = size_to_chunks(size); spin_lock(&pool->lock); /* First, try to find an unbuddied zbud page. */ zhdr = NULL; for_each_unbuddied_list(i, chunks) { if (!list_empty(&pool->unbuddied[i])) { zhdr = list_first_entry(&pool->unbuddied[i], struct zbud_header, buddy); list_del(&zhdr->buddy); if (zhdr->first_chunks == 0) bud = FIRST; else bud = LAST; goto found; } } /* Couldn't find unbuddied zbud page, create new one */ spin_unlock(&pool->lock); page = alloc_page(gfp); if (!page) return -ENOMEM; spin_lock(&pool->lock); pool->pages_nr++; zhdr = init_zbud_page(page); bud = FIRST; found: if (bud == FIRST) zhdr->first_chunks = chunks; else zhdr->last_chunks = chunks; if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) { /* Add to unbuddied list */ freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); } else { /* Add to buddied list */ list_add(&zhdr->buddy, &pool->buddied); } /* Add/move zbud page to beginning of LRU */ if (!list_empty(&zhdr->lru)) list_del(&zhdr->lru); list_add(&zhdr->lru, &pool->lru); *handle = encode_handle(zhdr, bud); spin_unlock(&pool->lock); return 0; } /** * zbud_free() - frees the allocation associated with the given handle * @pool: pool in which the allocation resided * @handle: handle associated with the allocation returned by zbud_alloc() * * In the case that the zbud page in which the allocation resides is under * reclaim, as indicated by the PG_reclaim flag being set, this function * only sets the first|last_chunks to 0. The page is actually freed * once both buddies are evicted (see zbud_reclaim_page() below). */ void zbud_free(struct zbud_pool *pool, unsigned long handle) { struct zbud_header *zhdr; int freechunks; spin_lock(&pool->lock); zhdr = handle_to_zbud_header(handle); /* If first buddy, handle will be page aligned */ if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK) zhdr->last_chunks = 0; else zhdr->first_chunks = 0; if (zhdr->under_reclaim) { /* zbud page is under reclaim, reclaim will free */ spin_unlock(&pool->lock); return; } /* Remove from existing buddy list */ list_del(&zhdr->buddy); if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { /* zbud page is empty, free */ list_del(&zhdr->lru); free_zbud_page(zhdr); pool->pages_nr--; } else { /* Add to unbuddied list */ freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); } spin_unlock(&pool->lock); } /** * zbud_reclaim_page() - evicts allocations from a pool page and frees it * @pool: pool from which a page will attempt to be evicted * @retires: number of pages on the LRU list for which eviction will * be attempted before failing * * zbud reclaim is different from normal system reclaim in that the reclaim is * done from the bottom, up. This is because only the bottom layer, zbud, has * information on how the allocations are organized within each zbud page. This * has the potential to create interesting locking situations between zbud and * the user, however. * * To avoid these, this is how zbud_reclaim_page() should be called: * The user detects a page should be reclaimed and calls zbud_reclaim_page(). * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call * the user-defined eviction handler with the pool and handle as arguments. * * If the handle can not be evicted, the eviction handler should return * non-zero. zbud_reclaim_page() will add the zbud page back to the * appropriate list and try the next zbud page on the LRU up to * a user defined number of retries. * * If the handle is successfully evicted, the eviction handler should * return 0 _and_ should have called zbud_free() on the handle. zbud_free() * contains logic to delay freeing the page if the page is under reclaim, * as indicated by the setting of the PG_reclaim flag on the underlying page. * * If all buddies in the zbud page are successfully evicted, then the * zbud page can be freed. * * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are * no pages to evict or an eviction handler is not registered, -EAGAIN if * the retry limit was hit. */ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries) { int i, ret, freechunks; struct zbud_header *zhdr; unsigned long first_handle = 0, last_handle = 0; spin_lock(&pool->lock); if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || retries == 0) { spin_unlock(&pool->lock); return -EINVAL; } for (i = 0; i < retries; i++) { zhdr = list_last_entry(&pool->lru, struct zbud_header, lru); list_del(&zhdr->lru); list_del(&zhdr->buddy); /* Protect zbud page against free */ zhdr->under_reclaim = true; /* * We need encode the handles before unlocking, since we can * race with free that will set (first|last)_chunks to 0 */ first_handle = 0; last_handle = 0; if (zhdr->first_chunks) first_handle = encode_handle(zhdr, FIRST); if (zhdr->last_chunks) last_handle = encode_handle(zhdr, LAST); spin_unlock(&pool->lock); /* Issue the eviction callback(s) */ if (first_handle) { ret = pool->ops->evict(pool, first_handle); if (ret) goto next; } if (last_handle) { ret = pool->ops->evict(pool, last_handle); if (ret) goto next; } next: spin_lock(&pool->lock); zhdr->under_reclaim = false; if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { /* * Both buddies are now free, free the zbud page and * return success. */ free_zbud_page(zhdr); pool->pages_nr--; spin_unlock(&pool->lock); return 0; } else if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) { /* add to unbuddied list */ freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); } else { /* add to buddied list */ list_add(&zhdr->buddy, &pool->buddied); } /* add to beginning of LRU */ list_add(&zhdr->lru, &pool->lru); } spin_unlock(&pool->lock); return -EAGAIN; } /** * zbud_map() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides * @handle: handle associated with the allocation to be mapped * * While trivial for zbud, the mapping functions for others allocators * implementing this allocation API could have more complex information encoded * in the handle and could create temporary mappings to make the data * accessible to the user. * * Returns: a pointer to the mapped allocation */ void *zbud_map(struct zbud_pool *pool, unsigned long handle) { return (void *)(handle); } /** * zbud_unmap() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides * @handle: handle associated with the allocation to be unmapped */ void zbud_unmap(struct zbud_pool *pool, unsigned long handle) { } /** * zbud_get_pool_size() - gets the zbud pool size in pages * @pool: pool whose size is being queried * * Returns: size in pages of the given pool. The pool lock need not be * taken to access pages_nr. */ u64 zbud_get_pool_size(struct zbud_pool *pool) { return pool->pages_nr; } static int __init init_zbud(void) { /* Make sure the zbud header will fit in one chunk */ BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED); pr_info("loaded\n"); #ifdef CONFIG_ZPOOL zpool_register_driver(&zbud_zpool_driver); #endif return 0; } static void __exit exit_zbud(void) { #ifdef CONFIG_ZPOOL zpool_unregister_driver(&zbud_zpool_driver); #endif pr_info("unloaded\n"); } module_init(init_zbud); module_exit(exit_zbud); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
null
null
null
null
112,293
25,164
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,164
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_COMMON_PERMISSIONS_MEDIA_GALLERIES_PERMISSION_H_ #define EXTENSIONS_COMMON_PERMISSIONS_MEDIA_GALLERIES_PERMISSION_H_ #include "extensions/common/permissions/api_permission.h" #include "extensions/common/permissions/media_galleries_permission_data.h" #include "extensions/common/permissions/set_disjunction_permission.h" namespace extensions { // Media Galleries permissions are as follows: // <media-galleries-permission-pattern> // := <access> | <access> 'allAutoDetected' | 'allAutoDetected' | // <access> 'scan' | 'scan' // <access> := 'read' | 'read' <access> | 'read' <secondary-access> // <secondary-access> // := 'delete' | 'delete' <secondary-access> | // 'delete' <tertiary-access> // <tertiary-access> // := 'copyTo' | 'copyTo' <tertiary-access> // An example of a line for mediaGalleries permissions in a manifest file: // {"mediaGalleries": "read delete"}, // We also allow a permission without any sub-permissions: // "mediaGalleries", class MediaGalleriesPermission : public SetDisjunctionPermission<MediaGalleriesPermissionData, MediaGalleriesPermission> { public: struct CheckParam : public APIPermission::CheckParam { explicit CheckParam(const std::string& permission) : permission(permission) {} const std::string permission; }; explicit MediaGalleriesPermission(const APIPermissionInfo* info); ~MediaGalleriesPermission() override; // SetDisjunctionPermission overrides. // MediaGalleriesPermission does additional checks to make sure the // permissions do not contain unknown values. bool FromValue(const base::Value* value, std::string* error, std::vector<std::string>* unhandled_permissions) override; // APIPermission overrides. PermissionIDSet GetPermissions() const override; // Permission strings. static const char kAllAutoDetectedPermission[]; static const char kScanPermission[]; static const char kReadPermission[]; static const char kCopyToPermission[]; static const char kDeletePermission[]; }; } // namespace extensions #endif // EXTENSIONS_COMMON_PERMISSIONS_MEDIA_GALLERIES_PERMISSION_H_
null
null
null
null
22,027
1,931
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,988
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * VC-1 and WMV3 decoder * Copyright (c) 2006-2007 Konstantin Shishkov * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_VC1_H #define AVCODEC_VC1_H #include "avcodec.h" #include "h264chroma.h" #include "mpegvideo.h" #include "intrax8.h" #include "vc1_common.h" #include "vc1dsp.h" #define AC_VLC_BITS 9 /** Sequence quantizer mode */ //@{ enum QuantMode { QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames QUANT_UNIFORM ///< Uniform quant used for all frames }; //@} /** Where quant can be changed */ //@{ enum DQProfile { DQPROFILE_FOUR_EDGES, DQPROFILE_DOUBLE_EDGES, DQPROFILE_SINGLE_EDGE, DQPROFILE_ALL_MBS }; //@} /** @name Where quant can be changed */ //@{ enum DQSingleEdge { DQSINGLE_BEDGE_LEFT, DQSINGLE_BEDGE_TOP, DQSINGLE_BEDGE_RIGHT, DQSINGLE_BEDGE_BOTTOM }; //@} /** Which pair of edges is quantized with ALTPQUANT */ //@{ enum DQDoubleEdge { DQDOUBLE_BEDGE_TOPLEFT, DQDOUBLE_BEDGE_TOPRIGHT, DQDOUBLE_BEDGE_BOTTOMRIGHT, DQDOUBLE_BEDGE_BOTTOMLEFT }; //@} /** MV modes for P-frames */ //@{ enum MVModes { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV, MV_PMODE_INTENSITY_COMP }; //@} /** MBMODE for interlaced frame P-picture */ //@{ enum MBModesIntfr { MV_PMODE_INTFR_1MV, MV_PMODE_INTFR_2MV_FIELD, MV_PMODE_INTFR_2MV, MV_PMODE_INTFR_4MV_FIELD, MV_PMODE_INTFR_4MV, MV_PMODE_INTFR_INTRA, }; //@} /** @name MV types for B-frames */ //@{ enum BMVTypes { BMV_TYPE_BACKWARD, BMV_TYPE_FORWARD, BMV_TYPE_INTERPOLATED, BMV_TYPE_DIRECT }; //@} /** @name Block types for P/B-frames */ //@{ enum TransformTypes { TT_8X8, TT_8X4_BOTTOM, TT_8X4_TOP, TT_8X4, // both halves TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X8, // both halves TT_4X4 }; //@} enum CodingSet { CS_HIGH_MOT_INTRA = 0, CS_HIGH_MOT_INTER, CS_LOW_MOT_INTRA, CS_LOW_MOT_INTER, CS_MID_RATE_INTRA, CS_MID_RATE_INTER, CS_HIGH_RATE_INTRA, CS_HIGH_RATE_INTER }; /** @name Overlap conditions for Advanced Profile */ //@{ enum COTypes { CONDOVER_NONE = 0, CONDOVER_ALL, CONDOVER_SELECT }; //@} /** * FCM Frame Coding Mode * @note some content might be marked interlaced * but have fcm set to 0 as well (e.g. HD-DVD) */ enum FrameCodingMode { PROGRESSIVE = 0, ///< in the bitstream is reported as 00b ILACE_FRAME, ///< in the bitstream is reported as 10b ILACE_FIELD ///< in the bitstream is reported as 11b }; /** * Imode types * @{ */ enum Imode { IMODE_RAW, IMODE_NORM2, IMODE_DIFF2, IMODE_NORM6, IMODE_DIFF6, IMODE_ROWSKIP, IMODE_COLSKIP }; /** @} */ //imode defines /** The VC1 Context * @todo Change size wherever another size is more efficient * Many members are only used for Advanced Profile */ typedef struct VC1Context{ MpegEncContext s; IntraX8Context x8; H264ChromaContext h264chroma; VC1DSPContext vc1dsp; int bits; /** Simple/Main Profile sequence header */ //@{ int res_sprite; ///< reserved, sprite mode int res_y411; ///< reserved, old interlaced mode int res_x8; ///< reserved int multires; ///< frame-level RESPIC syntax element present int res_fasttx; ///< reserved, always 1 int res_transtab; ///< reserved, always 0 int rangered; ///< RANGEREDFRM (range reduction) syntax element present ///< at frame level int res_rtm_flag; ///< reserved, set to 1 int reserved; ///< reserved //@} /** Advanced Profile */ //@{ int level; ///< 3 bits, for Advanced/Simple Profile, provided by TS layer int chromaformat; ///< 2 bits, 2=4:2:0, only defined int postprocflag; ///< Per-frame processing suggestion flag present int broadcast; ///< TFF/RFF present int interlace; ///< Progressive/interlaced (RPTFTM syntax element) int tfcntrflag; ///< TFCNTR present int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present int refdist_flag; ///< REFDIST syntax element present in II, IP, PI or PP field picture headers int extended_dmv; ///< Additional extended dmv range at P/B-frame-level int color_prim; ///< 8 bits, chroma coordinates of the color primaries int transfer_char; ///< 8 bits, Opto-electronic transfer characteristics int matrix_coef; ///< 8 bits, Color primaries->YCbCr transform matrix int hrd_param_flag; ///< Presence of Hypothetical Reference ///< Decoder parameters int psf; ///< Progressive Segmented Frame //@} /** Sequence header data for all Profiles * TODO: choose between ints, uint8_ts and monobit flags */ //@{ int profile; ///< 2 bits, Profile int frmrtq_postproc; ///< 3 bits, int bitrtq_postproc; ///< 5 bits, quantized framerate-based postprocessing strength int max_coded_width, max_coded_height; int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple) int extended_mv; ///< Ext MV in P/B (not in Simple) int dquant; ///< How qscale varies with MBs, 2 bits (not in Simple) int vstransform; ///< variable-size [48]x[48] transform type + info int overlap; ///< overlapped transforms in use int quantizer_mode; ///< 2 bits, quantizer mode used for sequence, see QUANT_* int finterpflag; ///< INTERPFRM present //@} /** Frame decoding info for all profiles */ //@{ uint8_t mv_mode; ///< MV coding mode uint8_t mv_mode2; ///< Secondary MV coding mode (B-frames) int k_x; ///< Number of bits for MVs (depends on MV range) int k_y; ///< Number of bits for MVs (depends on MV range) int range_x, range_y; ///< MV range uint8_t pq, altpq; ///< Current/alternate frame quantizer scale uint8_t zz_8x8[4][64]; ///< Zigzag table for TT_8x8, permuted for IDCT int left_blk_sh, top_blk_sh; ///< Either 3 or 0, positions of l/t in blk[] const uint8_t* zz_8x4; ///< Zigzag scan table for TT_8x4 coding mode const uint8_t* zz_4x8; ///< Zigzag scan table for TT_4x8 coding mode /** pquant parameters */ //@{ uint8_t dquantfrm; uint8_t dqprofile; uint8_t dqsbedge; uint8_t dqbilevel; //@} /** AC coding set indexes * @see 8.1.1.10, p(1)10 */ //@{ int c_ac_table_index; ///< Chroma index from ACFRM element int y_ac_table_index; ///< Luma index from AC2FRM element //@} int ttfrm; ///< Transform type info present at frame level uint8_t ttmbf; ///< Transform type flag int *ttblk_base, *ttblk; ///< Transform type at the block level int codingset; ///< index of current table set from 11.8 to use for luma block decoding int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding int pqindex; ///< raw pqindex used in coding set selection int a_avail, c_avail; uint8_t *mb_type_base, *mb_type[3]; /** Luma compensation parameters */ //@{ uint8_t lumscale; uint8_t lumshift; //@} int16_t bfraction; ///< Relative position % anchors=> how to scale MVs uint8_t halfpq; ///< Uniform quant over image and qp+.5 uint8_t respic; ///< Frame-level flag for resized images int buffer_fullness; ///< HRD info /** Ranges: * -# 0 -> [-64n 63.f] x [-32, 31.f] * -# 1 -> [-128, 127.f] x [-64, 63.f] * -# 2 -> [-512, 511.f] x [-128, 127.f] * -# 3 -> [-1024, 1023.f] x [-256, 255.f] */ uint8_t mvrange; ///< Extended MV range flag uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use VLC *cbpcy_vlc; ///< CBPCY VLC table int tt_index; ///< Index for Transform Type tables (to decode TTMB) uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV) uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs uint8_t* forward_mb_plane; ///< bitplane for "forward" MBs int mv_type_is_raw; ///< mv type mb plane is not coded int dmb_is_raw; ///< direct mb plane is raw int fmb_is_raw; ///< forward mb plane is raw int skip_is_raw; ///< skip mb plane is not coded uint8_t last_luty[2][256], last_lutuv[2][256]; ///< lookup tables used for intensity compensation uint8_t aux_luty[2][256], aux_lutuv[2][256]; ///< lookup tables used for intensity compensation uint8_t next_luty[2][256], next_lutuv[2][256]; ///< lookup tables used for intensity compensation uint8_t (*curr_luty)[256] ,(*curr_lutuv)[256]; int last_use_ic, *curr_use_ic, next_use_ic, aux_use_ic; int rnd; ///< rounding control int cbptab; /** Frame decoding info for S/M profiles only */ //@{ uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128) uint8_t interpfrm; //@} /** Frame decoding info for Advanced profile */ //@{ enum FrameCodingMode fcm; uint8_t numpanscanwin; uint8_t tfcntr; uint8_t rptfrm, tff, rff; uint16_t topleftx; uint16_t toplefty; uint16_t bottomrightx; uint16_t bottomrighty; uint8_t uvsamp; uint8_t postproc; int hrd_num_leaky_buckets; uint8_t bit_rate_exponent; uint8_t buffer_size_exponent; uint8_t* acpred_plane; ///< AC prediction flags bitplane int acpred_is_raw; uint8_t* over_flags_plane; ///< Overflags bitplane int overflg_is_raw; uint8_t condover; uint16_t *hrd_rate, *hrd_buffer; uint8_t *hrd_fullness; uint8_t range_mapy_flag; uint8_t range_mapuv_flag; uint8_t range_mapy; uint8_t range_mapuv; //@} /** Frame decoding info for interlaced picture */ uint8_t dmvrange; ///< Extended differential MV range flag int fourmvswitch; int intcomp; uint8_t lumscale2; ///< for interlaced field P picture uint8_t lumshift2; VLC* mbmode_vlc; VLC* imv_vlc; VLC* twomvbp_vlc; VLC* fourmvbp_vlc; uint8_t twomvbp; uint8_t fourmvbp; uint8_t* fieldtx_plane; int fieldtx_is_raw; uint8_t zzi_8x8[64]; uint8_t *blk_mv_type_base, *blk_mv_type; ///< 0: frame MV, 1: field MV (interlaced frame) uint8_t *mv_f_base, *mv_f[2]; ///< 0: MV obtained from same field, 1: opposite field uint8_t *mv_f_next_base, *mv_f_next[2]; int field_mode; ///< 1 for interlaced field pictures int fptype; int second_field; int refdist; ///< distance of the current picture from reference int numref; ///< number of past field pictures used as reference // 0 corresponds to 1 and 1 corresponds to 2 references int reffield; ///< if numref = 0 (1 reference) then reffield decides which // field to use among the two fields from previous frame int intcompfield; ///< which of the two fields to be intensity compensated // 0: both fields, 1: bottom field, 2: top field int cur_field_type; ///< 0: top, 1: bottom int ref_field_type[2]; ///< forward and backward reference field type (top or bottom) int blocks_off, mb_off; int qs_last; ///< if qpel has been used in the previous (tr.) picture int bmvtype; int frfd, brfd; ///< reference frame distance (forward or backward) int first_pic_header_flag; int pic_header_flag; int mbmodetab; int icbptab; int imvtab; int twomvbptab; int fourmvbptab; /** Frame decoding info for sprite modes */ //@{ int new_sprite; int two_sprites; AVFrame *sprite_output_frame; int output_width, output_height, sprite_width, sprite_height; uint8_t* sr_rows[2][2]; ///< Sprite resizer line cache //@} int p_frame_skipped; int bi_type; int x8_type; int16_t (*block)[6][64]; int n_allocated_blks, cur_blk_idx, left_blk_idx, topleft_blk_idx, top_blk_idx; uint32_t *cbp_base, *cbp; uint8_t *is_intra_base, *is_intra; int16_t (*luma_mv_base)[2], (*luma_mv)[2]; uint8_t bfraction_lut_index; ///< Index for BFRACTION value (see Table 40, reproduced into ff_vc1_bfraction_lut[]) uint8_t broken_link; ///< Broken link flag (BROKEN_LINK syntax element) uint8_t closed_entry; ///< Closed entry point flag (CLOSED_ENTRY syntax element) int end_mb_x; ///< Horizontal macroblock limit (used only by mss2) int parse_only; ///< Context is used within parser int resync_marker; ///< could this stream contain resync markers } VC1Context; /** * Decode Simple/Main Profiles sequence header * @see Figure 7-8, p16-17 * @param avctx Codec context * @param gb GetBit context initialized from Codec context extra_data * @return Status */ int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb); int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb); int ff_vc1_parse_frame_header (VC1Context *v, GetBitContext *gb); int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb); int ff_vc1_init_common(VC1Context *v); int ff_vc1_decode_init_alloc_tables(VC1Context *v); void ff_vc1_init_transposed_scantables(VC1Context *v); int ff_vc1_decode_end(AVCodecContext *avctx); void ff_vc1_decode_blocks(VC1Context *v); void ff_vc1_loop_filter_iblk(VC1Context *v, int pq); void ff_vc1_i_overlap_filter(VC1Context *v); void ff_vc1_p_overlap_filter(VC1Context *v); void ff_vc1_i_loop_filter(VC1Context *v); void ff_vc1_p_loop_filter(VC1Context *v); void ff_vc1_p_intfr_loop_filter(VC1Context *v); void ff_vc1_b_intfi_loop_filter(VC1Context *v); void ff_vc1_mc_1mv(VC1Context *v, int dir); void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg); void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir); void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg); void ff_vc1_interp_mc(VC1Context *v); #endif /* AVCODEC_VC1_H */
null
null
null
null
71,043
11,886
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
11,886
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/omnibox/browser/scored_history_match.h" #include <algorithm> #include <memory> #include <utility> #include "base/auto_reset.h" #include "base/bind.h" #include "base/i18n/break_iterator.h" #include "base/strings/string16.h" #include "base/strings/utf_string_conversions.h" #include "components/omnibox/browser/omnibox_field_trial.h" #include "components/search_engines/search_terms_data.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" using base::ASCIIToUTF16; using testing::ElementsAre; using testing::Pair; namespace { // Returns a VisitInfoVector that includes |num_visits| spread over the // last |frequency|*|num_visits| days (relative to |now|). A frequency of // one means one visit each day, two means every other day, etc. VisitInfoVector CreateVisitInfoVector(int num_visits, int frequency, base::Time now) { VisitInfoVector visits; for (int i = 0; i < num_visits; ++i) { visits.push_back( std::make_pair(now - base::TimeDelta::FromDays(i * frequency), ui::PAGE_TRANSITION_LINK)); } return visits; } } // namespace class ScoredHistoryMatchTest : public testing::Test { protected: // Convenience function to create a history::URLRow with basic data for |url|, // |title|, |visit_count|, and |typed_count|. |days_since_last_visit| gives // the number of days ago to which to set the URL's last_visit. history::URLRow MakeURLRow(const char* url, const char* title, int visit_count, int days_since_last_visit, int typed_count); // Convenience function to set the word starts information from a // history::URLRow's URL and title. void PopulateWordStarts(const history::URLRow& url_row, RowWordStarts* word_starts); // Convenience functions for easily creating vectors of search terms. String16Vector Make1Term(const char* term) const; String16Vector Make2Terms(const char* term_1, const char* term_2) const; // Convenience function for GetTopicalityScore() that builds the term match // and word break information automatically that are needed to call // GetTopicalityScore(). It only works for scoring a single term, not // multiple terms. float GetTopicalityScoreOfTermAgainstURLAndTitle(const base::string16& term, const GURL& url, const base::string16& title); }; history::URLRow ScoredHistoryMatchTest::MakeURLRow(const char* url, const char* title, int visit_count, int days_since_last_visit, int typed_count) { history::URLRow row(GURL(url), 0); row.set_title(ASCIIToUTF16(title)); row.set_visit_count(visit_count); row.set_typed_count(typed_count); row.set_last_visit(base::Time::NowFromSystemTime() - base::TimeDelta::FromDays(days_since_last_visit)); return row; } void ScoredHistoryMatchTest::PopulateWordStarts(const history::URLRow& url_row, RowWordStarts* word_starts) { String16SetFromString16(ASCIIToUTF16(url_row.url().spec()), &word_starts->url_word_starts_); String16SetFromString16(url_row.title(), &word_starts->title_word_starts_); } String16Vector ScoredHistoryMatchTest::Make1Term(const char* term) const { String16Vector original_terms; original_terms.push_back(ASCIIToUTF16(term)); return original_terms; } String16Vector ScoredHistoryMatchTest::Make2Terms(const char* term_1, const char* term_2) const { String16Vector original_terms; original_terms.push_back(ASCIIToUTF16(term_1)); original_terms.push_back(ASCIIToUTF16(term_2)); return original_terms; } float ScoredHistoryMatchTest::GetTopicalityScoreOfTermAgainstURLAndTitle( const base::string16& term, const GURL& url, const base::string16& title) { String16Vector term_vector = {term}; WordStarts term_word_starts = {0}; base::i18n::BreakIterator iter(term, base::i18n::BreakIterator::BREAK_WORD); if (iter.Init()) { // Find the first word start. while (iter.Advance() && !iter.IsWord()) { } term_word_starts[0] = iter.prev(); } RowWordStarts row_word_starts; base::string16 url_string = base::UTF8ToUTF16(url.spec()); String16SetFromString16(url_string, &row_word_starts.url_word_starts_); String16SetFromString16(title, &row_word_starts.title_word_starts_); ScoredHistoryMatch scored_match(history::URLRow(GURL(url)), VisitInfoVector(), term, term_vector, term_word_starts, row_word_starts, false, 1, base::Time::Max()); scored_match.url_matches = MatchTermInString(term, url_string, 0); scored_match.title_matches = MatchTermInString(term, title, 0); scored_match.topicality_threshold_ = -1; return scored_match.GetTopicalityScore(1, url, base::OffsetAdjuster::Adjustments(), term_word_starts, row_word_starts); } TEST_F(ScoredHistoryMatchTest, Scoring) { // We use NowFromSystemTime() because MakeURLRow uses the same function // to calculate last visit time when building a row. base::Time now = base::Time::NowFromSystemTime(); history::URLRow row_a(MakeURLRow("http://fedcba", "abcd bcd", 3, 30, 1)); RowWordStarts word_starts_a; PopulateWordStarts(row_a, &word_starts_a); WordStarts one_word_no_offset(1, 0u); VisitInfoVector visits_a = CreateVisitInfoVector(3, 30, now); // Mark one visit as typed. visits_a[0].second = ui::PAGE_TRANSITION_TYPED; ScoredHistoryMatch scored_a(row_a, visits_a, ASCIIToUTF16("abc"), Make1Term("abc"), one_word_no_offset, word_starts_a, false, 1, now); // Test scores based on visit_count. history::URLRow row_b(MakeURLRow("http://abcdef", "abcd bcd", 10, 30, 1)); RowWordStarts word_starts_b; PopulateWordStarts(row_b, &word_starts_b); VisitInfoVector visits_b = CreateVisitInfoVector(10, 30, now); visits_b[0].second = ui::PAGE_TRANSITION_TYPED; ScoredHistoryMatch scored_b(row_b, visits_b, ASCIIToUTF16("abc"), Make1Term("abc"), one_word_no_offset, word_starts_b, false, 1, now); EXPECT_GT(scored_b.raw_score, scored_a.raw_score); // Test scores based on last_visit. history::URLRow row_c(MakeURLRow("http://abcdef", "abcd bcd", 3, 10, 1)); RowWordStarts word_starts_c; PopulateWordStarts(row_c, &word_starts_c); VisitInfoVector visits_c = CreateVisitInfoVector(3, 10, now); visits_c[0].second = ui::PAGE_TRANSITION_TYPED; ScoredHistoryMatch scored_c(row_c, visits_c, ASCIIToUTF16("abc"), Make1Term("abc"), one_word_no_offset, word_starts_c, false, 1, now); EXPECT_GT(scored_c.raw_score, scored_a.raw_score); // Test scores based on typed_count. history::URLRow row_d(MakeURLRow("http://abcdef", "abcd bcd", 3, 30, 3)); RowWordStarts word_starts_d; PopulateWordStarts(row_d, &word_starts_d); VisitInfoVector visits_d = CreateVisitInfoVector(3, 30, now); visits_d[0].second = ui::PAGE_TRANSITION_TYPED; visits_d[1].second = ui::PAGE_TRANSITION_TYPED; visits_d[2].second = ui::PAGE_TRANSITION_TYPED; ScoredHistoryMatch scored_d(row_d, visits_d, ASCIIToUTF16("abc"), Make1Term("abc"), one_word_no_offset, word_starts_d, false, 1, now); EXPECT_GT(scored_d.raw_score, scored_a.raw_score); // Test scores based on a terms appearing multiple times. history::URLRow row_e(MakeURLRow( "http://csi.csi.csi/csi_csi", "CSI Guide to CSI Las Vegas, CSI New York, CSI Provo", 3, 30, 3)); RowWordStarts word_starts_e; PopulateWordStarts(row_e, &word_starts_e); const VisitInfoVector visits_e = visits_d; ScoredHistoryMatch scored_e(row_e, visits_e, ASCIIToUTF16("csi"), Make1Term("csi"), one_word_no_offset, word_starts_e, false, 1, now); EXPECT_LT(scored_e.raw_score, 1400); // Test that a result with only a mid-term match (i.e., not at a word // boundary) scores 0. ScoredHistoryMatch scored_f(row_a, visits_a, ASCIIToUTF16("cd"), Make1Term("cd"), one_word_no_offset, word_starts_a, false, 1, now); EXPECT_EQ(scored_f.raw_score, 0); } TEST_F(ScoredHistoryMatchTest, ScoringBookmarks) { // We use NowFromSystemTime() because MakeURLRow uses the same function // to calculate last visit time when building a row. base::Time now = base::Time::NowFromSystemTime(); std::string url_string("http://fedcba"); const GURL url(url_string); history::URLRow row(MakeURLRow(url_string.c_str(), "abcd bcd", 8, 3, 1)); RowWordStarts word_starts; PopulateWordStarts(row, &word_starts); WordStarts one_word_no_offset(1, 0u); VisitInfoVector visits = CreateVisitInfoVector(8, 3, now); ScoredHistoryMatch scored(row, visits, ASCIIToUTF16("abc"), Make1Term("abc"), one_word_no_offset, word_starts, false, 1, now); // Now check that if URL is bookmarked then its score increases. base::AutoReset<float> reset(&ScoredHistoryMatch::bookmark_value_, 5); ScoredHistoryMatch scored_with_bookmark(row, visits, ASCIIToUTF16("abc"), Make1Term("abc"), one_word_no_offset, word_starts, true, 1, now); EXPECT_GT(scored_with_bookmark.raw_score, scored.raw_score); } TEST_F(ScoredHistoryMatchTest, ScoringTLD) { // We use NowFromSystemTime() because MakeURLRow uses the same function // to calculate last visit time when building a row. base::Time now = base::Time::NowFromSystemTime(); // By default the URL should not be returned for a query that includes "com". std::string url_string("http://fedcba.com/"); const GURL url(url_string); history::URLRow row(MakeURLRow(url_string.c_str(), "", 8, 3, 1)); RowWordStarts word_starts; PopulateWordStarts(row, &word_starts); WordStarts two_words_no_offsets(2, 0u); VisitInfoVector visits = CreateVisitInfoVector(8, 3, now); ScoredHistoryMatch scored(row, visits, ASCIIToUTF16("fed com"), Make2Terms("fed", "com"), two_words_no_offsets, word_starts, false, 1, now); EXPECT_EQ(0, scored.raw_score); // Now allow credit for the match in the TLD. base::AutoReset<bool> reset(&ScoredHistoryMatch::allow_tld_matches_, true); ScoredHistoryMatch scored_with_tld( row, visits, ASCIIToUTF16("fed com"), Make2Terms("fed", "com"), two_words_no_offsets, word_starts, false, 1, now); EXPECT_GT(scored_with_tld.raw_score, 0); } TEST_F(ScoredHistoryMatchTest, ScoringScheme) { // We use NowFromSystemTime() because MakeURLRow uses the same function // to calculate last visit time when building a row. base::Time now = base::Time::NowFromSystemTime(); // By default the URL should not be returned for a query that includes "http". std::string url_string("http://fedcba/"); const GURL url(url_string); history::URLRow row(MakeURLRow(url_string.c_str(), "", 8, 3, 1)); RowWordStarts word_starts; PopulateWordStarts(row, &word_starts); WordStarts two_words_no_offsets(2, 0u); VisitInfoVector visits = CreateVisitInfoVector(8, 3, now); ScoredHistoryMatch scored(row, visits, ASCIIToUTF16("fed http"), Make2Terms("fed", "http"), two_words_no_offsets, word_starts, false, 1, now); EXPECT_EQ(0, scored.raw_score); // Now allow credit for the match in the scheme. base::AutoReset<bool> reset(&ScoredHistoryMatch::allow_scheme_matches_, true); ScoredHistoryMatch scored_with_scheme( row, visits, ASCIIToUTF16("fed http"), Make2Terms("fed", "http"), two_words_no_offsets, word_starts, false, 1, now); EXPECT_GT(scored_with_scheme.raw_score, 0); } TEST_F(ScoredHistoryMatchTest, MatchURLComponents) { // We use NowFromSystemTime() because MakeURLRow uses the same function // to calculate last visit time when building a row. base::Time now = base::Time::NowFromSystemTime(); RowWordStarts word_starts; WordStarts one_word_no_offset(1, 0u); VisitInfoVector visits; { history::URLRow row( MakeURLRow("http://www.google.com", "abcdef", 3, 30, 1)); PopulateWordStarts(row, &word_starts); ScoredHistoryMatch scored_a(row, visits, ASCIIToUTF16("g"), Make1Term("g"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_a.match_in_scheme); EXPECT_FALSE(scored_a.match_in_subdomain); EXPECT_FALSE(scored_a.match_after_host); ScoredHistoryMatch scored_b(row, visits, ASCIIToUTF16("w"), Make1Term("w"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_b.match_in_scheme); EXPECT_TRUE(scored_b.match_in_subdomain); EXPECT_FALSE(scored_b.match_after_host); ScoredHistoryMatch scored_c(row, visits, ASCIIToUTF16("h"), Make1Term("h"), one_word_no_offset, word_starts, false, 1, now); EXPECT_TRUE(scored_c.match_in_scheme); EXPECT_FALSE(scored_c.match_in_subdomain); EXPECT_FALSE(scored_c.match_after_host); ScoredHistoryMatch scored_d(row, visits, ASCIIToUTF16("o"), Make1Term("o"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_d.match_in_scheme); EXPECT_FALSE(scored_d.match_in_subdomain); EXPECT_FALSE(scored_d.match_after_host); } { history::URLRow row(MakeURLRow("http://teams.foo.com", "abcdef", 3, 30, 1)); PopulateWordStarts(row, &word_starts); ScoredHistoryMatch scored_a(row, visits, ASCIIToUTF16("t"), Make1Term("t"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_a.match_in_scheme); EXPECT_TRUE(scored_a.match_in_subdomain); EXPECT_FALSE(scored_a.match_after_host); ScoredHistoryMatch scored_b(row, visits, ASCIIToUTF16("f"), Make1Term("f"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_b.match_in_scheme); EXPECT_FALSE(scored_b.match_in_subdomain); EXPECT_FALSE(scored_b.match_after_host); ScoredHistoryMatch scored_c(row, visits, ASCIIToUTF16("o"), Make1Term("o"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_c.match_in_scheme); EXPECT_FALSE(scored_c.match_in_subdomain); EXPECT_FALSE(scored_c.match_after_host); } { history::URLRow row(MakeURLRow("http://en.m.foo.com", "abcdef", 3, 30, 1)); PopulateWordStarts(row, &word_starts); ScoredHistoryMatch scored_a(row, visits, ASCIIToUTF16("e"), Make1Term("e"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_a.match_in_scheme); EXPECT_TRUE(scored_a.match_in_subdomain); EXPECT_FALSE(scored_a.match_after_host); ScoredHistoryMatch scored_b(row, visits, ASCIIToUTF16("m"), Make1Term("m"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_b.match_in_scheme); EXPECT_TRUE(scored_b.match_in_subdomain); EXPECT_FALSE(scored_b.match_after_host); ScoredHistoryMatch scored_c(row, visits, ASCIIToUTF16("f"), Make1Term("f"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_c.match_in_scheme); EXPECT_FALSE(scored_c.match_in_subdomain); EXPECT_FALSE(scored_c.match_after_host); } { history::URLRow row( MakeURLRow("https://www.testing.com/xxx?yyy#zzz", "abcdef", 3, 30, 1)); PopulateWordStarts(row, &word_starts); ScoredHistoryMatch scored_a(row, visits, ASCIIToUTF16("t"), Make1Term("t"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_a.match_in_scheme); EXPECT_FALSE(scored_a.match_in_subdomain); EXPECT_FALSE(scored_a.match_after_host); ScoredHistoryMatch scored_b(row, visits, ASCIIToUTF16("h"), Make1Term("h"), one_word_no_offset, word_starts, false, 1, now); EXPECT_TRUE(scored_b.match_in_scheme); EXPECT_FALSE(scored_b.match_in_subdomain); EXPECT_FALSE(scored_b.match_after_host); ScoredHistoryMatch scored_c(row, visits, ASCIIToUTF16("w"), Make1Term("w"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_c.match_in_scheme); EXPECT_TRUE(scored_c.match_in_subdomain); EXPECT_FALSE(scored_c.match_after_host); ScoredHistoryMatch scored_d(row, visits, ASCIIToUTF16("x"), Make1Term("x"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_d.match_in_scheme); EXPECT_FALSE(scored_d.match_in_subdomain); EXPECT_TRUE(scored_d.match_after_host); ScoredHistoryMatch scored_e(row, visits, ASCIIToUTF16("y"), Make1Term("y"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_e.match_in_scheme); EXPECT_FALSE(scored_e.match_in_subdomain); EXPECT_TRUE(scored_e.match_after_host); ScoredHistoryMatch scored_f(row, visits, ASCIIToUTF16("z"), Make1Term("z"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_f.match_in_scheme); EXPECT_FALSE(scored_f.match_in_subdomain); EXPECT_TRUE(scored_f.match_after_host); ScoredHistoryMatch scored_g(row, visits, ASCIIToUTF16("https://www"), Make1Term("https://www"), one_word_no_offset, word_starts, false, 1, now); EXPECT_TRUE(scored_g.match_in_scheme); EXPECT_TRUE(scored_g.match_in_subdomain); EXPECT_FALSE(scored_g.match_after_host); ScoredHistoryMatch scored_h(row, visits, ASCIIToUTF16("testing.com/x"), Make1Term("testing.com/x"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_h.match_in_scheme); EXPECT_FALSE(scored_h.match_in_subdomain); EXPECT_TRUE(scored_h.match_after_host); ScoredHistoryMatch scored_i(row, visits, ASCIIToUTF16("https://www.testing.com/x"), Make1Term("https://www.testing.com/x"), one_word_no_offset, word_starts, false, 1, now); EXPECT_TRUE(scored_i.match_in_scheme); EXPECT_TRUE(scored_i.match_in_subdomain); EXPECT_TRUE(scored_i.match_after_host); } { history::URLRow row( MakeURLRow("http://www.xn--1lq90ic7f1rc.cn/xnblah", "abcd", 3, 30, 1)); PopulateWordStarts(row, &word_starts); ScoredHistoryMatch scored_a(row, visits, ASCIIToUTF16("x"), Make1Term("x"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_a.match_in_scheme); EXPECT_FALSE(scored_a.match_in_subdomain); EXPECT_FALSE(scored_a.match_after_host); ScoredHistoryMatch scored_b(row, visits, ASCIIToUTF16("xn"), Make1Term("xn"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_b.match_in_scheme); EXPECT_FALSE(scored_b.match_in_subdomain); EXPECT_FALSE(scored_b.match_after_host); ScoredHistoryMatch scored_c(row, visits, ASCIIToUTF16("w"), Make1Term("w"), one_word_no_offset, word_starts, false, 1, now); EXPECT_FALSE(scored_c.match_in_scheme); EXPECT_TRUE(scored_c.match_in_subdomain); EXPECT_FALSE(scored_c.match_after_host); } } TEST_F(ScoredHistoryMatchTest, GetTopicalityScoreTrailingSlash) { const float hostname = GetTopicalityScoreOfTermAgainstURLAndTitle( ASCIIToUTF16("def"), GURL("http://abc.def.com/"), ASCIIToUTF16("Non-Matching Title")); const float hostname_no_slash = GetTopicalityScoreOfTermAgainstURLAndTitle( ASCIIToUTF16("def"), GURL("http://abc.def.com"), ASCIIToUTF16("Non-Matching Title")); EXPECT_EQ(hostname_no_slash, hostname); } TEST_F(ScoredHistoryMatchTest, FilterMatches) { // For ease in interpreting this test, imagine the URL // http://test.com/default/foo.aspxhome/hello.html. // 012345678901234567890123456789012345678901234567 // 1 2 3 4 // We test how FilterTermMatchesByWordStarts() reacts to various // one-character inputs. WordStarts terms_to_word_starts_offsets; terms_to_word_starts_offsets.push_back(0); WordStarts word_starts; word_starts.push_back(0); word_starts.push_back(7); word_starts.push_back(12); word_starts.push_back(16); word_starts.push_back(24); word_starts.push_back(28); word_starts.push_back(37); word_starts.push_back(43); // Check that "h" matches "http", "hello", and "html" but not "aspxhome" when // asked to filter non-word-start matches after the hostname. The "15" in // the filter call below is the position of the "/" ending the hostname. TermMatches term_matches; term_matches.push_back(TermMatch(0, 0, 1)); term_matches.push_back(TermMatch(0, 32, 1)); term_matches.push_back(TermMatch(0, 37, 1)); term_matches.push_back(TermMatch(0, 43, 1)); TermMatches filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( term_matches, terms_to_word_starts_offsets, word_starts, 15, std::string::npos); ASSERT_EQ(3u, filtered_term_matches.size()); EXPECT_EQ(0u, filtered_term_matches[0].offset); EXPECT_EQ(37u, filtered_term_matches[1].offset); EXPECT_EQ(43u, filtered_term_matches[2].offset); // The "http" match should remain after removing the mid-word matches in the // scheme. The "4" is the position of the ":" character ending the scheme. filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( filtered_term_matches, terms_to_word_starts_offsets, word_starts, 0, 5); ASSERT_EQ(3u, filtered_term_matches.size()); EXPECT_EQ(0u, filtered_term_matches[0].offset); EXPECT_EQ(37u, filtered_term_matches[1].offset); EXPECT_EQ(43u, filtered_term_matches[2].offset); // Check that "t" matches "http" twice and "test" twice but not "default" or // "html" when asked to filter non-word-start matches after the hostname. term_matches.clear(); term_matches.push_back(TermMatch(0, 1, 1)); term_matches.push_back(TermMatch(0, 2, 1)); term_matches.push_back(TermMatch(0, 7, 1)); term_matches.push_back(TermMatch(0, 10, 1)); term_matches.push_back(TermMatch(0, 22, 1)); term_matches.push_back(TermMatch(0, 45, 1)); filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( term_matches, terms_to_word_starts_offsets, word_starts, 15, std::string::npos); ASSERT_EQ(4u, filtered_term_matches.size()); EXPECT_EQ(1u, filtered_term_matches[0].offset); EXPECT_EQ(2u, filtered_term_matches[1].offset); EXPECT_EQ(7u, filtered_term_matches[2].offset); EXPECT_EQ(10u, filtered_term_matches[3].offset); // The "http" matches should disappear after removing mid-word matches in the // scheme. filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( filtered_term_matches, terms_to_word_starts_offsets, word_starts, 0, 4); ASSERT_EQ(2u, filtered_term_matches.size()); EXPECT_EQ(7u, filtered_term_matches[0].offset); EXPECT_EQ(10u, filtered_term_matches[1].offset); // Check that "e" matches "test" but not "default" or "hello" when asked to // filter non-word-start matches after the hostname. term_matches.clear(); term_matches.push_back(TermMatch(0, 8, 1)); term_matches.push_back(TermMatch(0, 17, 1)); term_matches.push_back(TermMatch(0, 38, 1)); filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( term_matches, terms_to_word_starts_offsets, word_starts, 15, std::string::npos); ASSERT_EQ(1u, filtered_term_matches.size()); EXPECT_EQ(8u, filtered_term_matches[0].offset); // Check that "d" matches "default" when asked to filter non-word-start // matches after the hostname. term_matches.clear(); term_matches.push_back(TermMatch(0, 16, 1)); filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( term_matches, terms_to_word_starts_offsets, word_starts, 15, std::string::npos); ASSERT_EQ(1u, filtered_term_matches.size()); EXPECT_EQ(16u, filtered_term_matches[0].offset); // Check that "a" matches "aspxhome" but not "default" when asked to filter // non-word-start matches after the hostname. term_matches.clear(); term_matches.push_back(TermMatch(0, 19, 1)); term_matches.push_back(TermMatch(0, 28, 1)); filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( term_matches, terms_to_word_starts_offsets, word_starts, 15, std::string::npos); ASSERT_EQ(1u, filtered_term_matches.size()); EXPECT_EQ(28u, filtered_term_matches[0].offset); // Check that ".a" matches "aspxhome", i.e., that we recognize that is // is a valid match at a word break. To recognize this, // |terms_to_word_starts_offsets| must record that the "word" in this term // starts at the second character. terms_to_word_starts_offsets[0] = 1; term_matches.clear(); term_matches.push_back(TermMatch(0, 27, 1)); filtered_term_matches = ScoredHistoryMatch::FilterTermMatchesByWordStarts( term_matches, terms_to_word_starts_offsets, word_starts, 15, std::string::npos); ASSERT_EQ(1u, filtered_term_matches.size()); EXPECT_EQ(27u, filtered_term_matches[0].offset); } TEST_F(ScoredHistoryMatchTest, GetFrequency) { // Build a fake ScoredHistoryMatch, which we'll then reuse multiple times. history::URLRow row(GURL("http://foo")); RowWordStarts row_word_starts; PopulateWordStarts(row, &row_word_starts); base::Time now(base::Time::Max()); VisitInfoVector visits; ScoredHistoryMatch match(row, visits, ASCIIToUTF16("foo"), Make1Term("foo"), WordStarts{0}, row_word_starts, false, 1, now); // Record the score for one untyped visit. visits = {{now, ui::PAGE_TRANSITION_LINK}}; const float one_untyped_score = match.GetFrequency(now, false, visits); // The score for one typed visit should be larger. visits = VisitInfoVector{{now, ui::PAGE_TRANSITION_TYPED}}; const float one_typed_score = match.GetFrequency(now, false, visits); EXPECT_GT(one_typed_score, one_untyped_score); // It shouldn't matter if the typed visit has a transition qualifier. visits = { {now, ui::PageTransitionFromInt(ui::PAGE_TRANSITION_TYPED | ui::PAGE_TRANSITION_SERVER_REDIRECT)}}; EXPECT_EQ(one_typed_score, match.GetFrequency(now, false, visits)); // A score for one untyped visit to a bookmarked page should be larger than // the one untyped visit to a non-bookmarked page. visits = {{now, ui::PAGE_TRANSITION_LINK}}; EXPECT_GE(match.GetFrequency(now, true, visits), one_untyped_score); // Now consider pages visited twice, with one visit being typed and one // untyped. // A two-visit score should have a higher score than the single typed visit // score. visits = {{now, ui::PAGE_TRANSITION_TYPED}, {now - base::TimeDelta::FromDays(1), ui::PAGE_TRANSITION_LINK}}; const float two_visits_score = match.GetFrequency(now, false, visits); EXPECT_GT(two_visits_score, one_typed_score); // Add an third untyped visit. visits.push_back( {now - base::TimeDelta::FromDays(2), ui::PAGE_TRANSITION_LINK}); // The score should be higher than the two-visit score. const float three_visits_score = match.GetFrequency(now, false, visits); EXPECT_GT(three_visits_score, two_visits_score); // If we're only supposed to consider the most recent two visits, then the // score should be the same as in the two-visit case. { base::AutoReset<size_t> tmp1(&ScoredHistoryMatch::max_visits_to_score_, 2); EXPECT_EQ(two_visits_score, match.GetFrequency(now, false, visits)); // Check again with the third visit being typed. visits[2].second = ui::PAGE_TRANSITION_TYPED; EXPECT_EQ(two_visits_score, match.GetFrequency(now, false, visits)); } } TEST_F(ScoredHistoryMatchTest, GetDocumentSpecificityScore) { // Build a fake ScoredHistoryMatch, which we'll then reuse multiple times. history::URLRow row(GURL("http://foo")); RowWordStarts row_word_starts; PopulateWordStarts(row, &row_word_starts); base::Time now(base::Time::Max()); VisitInfoVector visits; ScoredHistoryMatch match(row, visits, ASCIIToUTF16("foo"), Make1Term("foo"), WordStarts{0}, row_word_starts, false, 1, now); EXPECT_EQ(3.0, match.GetDocumentSpecificityScore(1)); EXPECT_EQ(1.0, match.GetDocumentSpecificityScore(5)); EXPECT_EQ(1.0, match.GetDocumentSpecificityScore(50)); OmniboxFieldTrial::NumMatchesScores matches_to_specificity; base::AutoReset<OmniboxFieldTrial::NumMatchesScores*> tmp( &ScoredHistoryMatch::matches_to_specificity_override_, &matches_to_specificity); matches_to_specificity = {{1, 3.0}}; EXPECT_EQ(3.0, match.GetDocumentSpecificityScore(1)); EXPECT_EQ(1.0, match.GetDocumentSpecificityScore(5)); matches_to_specificity = {{1, 3.0}, {3, 1.5}}; EXPECT_EQ(3.0, match.GetDocumentSpecificityScore(1)); EXPECT_EQ(1.5, match.GetDocumentSpecificityScore(2)); EXPECT_EQ(1.5, match.GetDocumentSpecificityScore(3)); EXPECT_EQ(1.0, match.GetDocumentSpecificityScore(4)); } // This function only tests scoring of single terms that match exactly // once somewhere in the URL or title. TEST_F(ScoredHistoryMatchTest, GetTopicalityScore) { GURL url("http://abc.def.com/path1/path2?arg1=val1&arg2=val2#hash_component"); base::string16 title = ASCIIToUTF16("here is a title"); auto Score = [&](const char* term) { return GetTopicalityScoreOfTermAgainstURLAndTitle(ASCIIToUTF16(term), url, title); }; const float hostname_score = Score("abc"); const float hostname_mid_word_score = Score("bc"); const float hostname_score_preceeding_punctuation = Score("://abc"); const float domain_name_score = Score("def"); const float domain_name_mid_word_score = Score("ef"); const float domain_name_score_preceeding_dot = Score(".def"); const float tld_score = Score("com"); const float tld_mid_word_score = Score("om"); const float tld_score_preceeding_dot = Score(".com"); const float path_score = Score("path1"); const float path_mid_word_score = Score("ath1"); const float path_score_preceeding_slash = Score("/path1"); const float arg_score = Score("arg1"); const float arg_mid_word_score = Score("rg1"); const float arg_score_preceeding_question_mark = Score("?arg1"); const float protocol_score = Score("htt"); const float protocol_mid_word_score = Score("tt"); const float title_score = Score("her"); const float title_mid_word_score = Score("er"); // Verify hostname and domain name > path > arg. EXPECT_GT(hostname_score, path_score); EXPECT_GT(domain_name_score, path_score); EXPECT_GT(path_score, arg_score); // Verify leading punctuation doesn't confuse scoring. EXPECT_EQ(hostname_score, hostname_score_preceeding_punctuation); EXPECT_EQ(domain_name_score, domain_name_score_preceeding_dot); EXPECT_EQ(tld_score, tld_score_preceeding_dot); EXPECT_EQ(path_score, path_score_preceeding_slash); EXPECT_EQ(arg_score, arg_score_preceeding_question_mark); // Verify that domain name > path and domain name > arg for non-word // boundaries. EXPECT_GT(hostname_mid_word_score, path_mid_word_score); EXPECT_GT(domain_name_mid_word_score, path_mid_word_score); EXPECT_GT(domain_name_mid_word_score, arg_mid_word_score); EXPECT_GT(hostname_mid_word_score, arg_mid_word_score); // Also verify that the matches at non-word-boundaries all score // worse than the matches at word boundaries. These three sets suffice. EXPECT_GT(arg_score, hostname_mid_word_score); EXPECT_GT(arg_score, domain_name_mid_word_score); EXPECT_GT(title_score, title_mid_word_score); // Check that title matches fit somewhere reasonable compared to the // various types of URL matches. EXPECT_GT(title_score, arg_score); EXPECT_GT(arg_score, title_mid_word_score); // Finally, verify that protocol matches and top level domain name // matches (.com, .net, etc.) score worse than some of the mid-word // matches that actually count. EXPECT_GT(hostname_mid_word_score, protocol_score); EXPECT_GT(hostname_mid_word_score, protocol_mid_word_score); EXPECT_GT(hostname_mid_word_score, tld_score); EXPECT_GT(hostname_mid_word_score, tld_mid_word_score); } // Test the function GetFinalRelevancyScore(). TEST_F(ScoredHistoryMatchTest, GetFinalRelevancyScore) { // relevance_buckets = "0.0:100,1.0:200,4.0:500,8.0:900,10.0:1000"; ScoredHistoryMatch::ScoreMaxRelevances relevance_buckets = { {0.0, 100}, {1.0, 200}, {4.0, 500}, {8.0, 900}, {10.0, 1000}}; base::AutoReset<ScoredHistoryMatch::ScoreMaxRelevances*> tmp( &ScoredHistoryMatch::relevance_buckets_override_, &relevance_buckets); // Check when topicality score is zero. float topicality_score = 0.0; float frequency_score = 10.0; float specificity_score = 1.0; // intermediate_score = 0.0 * 10.0 * 1.0 = 0.0. EXPECT_EQ(0, ScoredHistoryMatch::GetFinalRelevancyScore( topicality_score, frequency_score, specificity_score)); // Check when intermediate score falls at the border range. topicality_score = 0.4f; frequency_score = 10.0f; // intermediate_score = 0.4 * 10.0 * 1.0 = 4.0. EXPECT_EQ(500, ScoredHistoryMatch::GetFinalRelevancyScore( topicality_score, frequency_score, specificity_score)); // Checking the score that falls into one of the buckets. topicality_score = 0.5f; frequency_score = 10.0f; // intermediate_score = 0.5 * 10.0 * 1.0 = 5.0. EXPECT_EQ(600, // 500 + (((900 - 500)/(8 -4)) * 1) = 600. ScoredHistoryMatch::GetFinalRelevancyScore( topicality_score, frequency_score, specificity_score)); // Never give the score greater than maximum specified. topicality_score = 0.5f; frequency_score = 22.0f; // intermediate_score = 0.5 * 22.0 * 1.0 = 11.0 EXPECT_EQ(1000, ScoredHistoryMatch::GetFinalRelevancyScore( topicality_score, frequency_score, specificity_score)); } // Test the function GetHQPBucketsFromString(). TEST_F(ScoredHistoryMatchTest, GetHQPBucketsFromString) { std::string buckets_str = "0.0:400,1.5:600,12.0:1300,20.0:1399"; std::vector<ScoredHistoryMatch::ScoreMaxRelevance> hqp_buckets = ScoredHistoryMatch::GetHQPBucketsFromString(buckets_str); EXPECT_THAT(hqp_buckets, ElementsAre(Pair(0.0, 400), Pair(1.5, 600), Pair(12.0, 1300), Pair(20.0, 1399))); // Test using an invalid string. buckets_str = "0.0,400,1.5,600"; hqp_buckets = ScoredHistoryMatch::GetHQPBucketsFromString(buckets_str); EXPECT_TRUE(hqp_buckets.empty()); }
null
null
null
null
8,749
51,530
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,530
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/keyboard/keyboard_ui.h" #include "base/command_line.h" #include "ui/aura/window.h" #include "ui/base/ime/input_method.h" #include "ui/base/ime/text_input_client.h" #include "ui/base/ui_base_switches.h" #include "ui/keyboard/keyboard_controller.h" namespace keyboard { KeyboardUI::KeyboardUI() : keyboard_controller_(nullptr) {} KeyboardUI::~KeyboardUI() {} void KeyboardUI::ShowKeyboardContainer(aura::Window* container) { if (HasContentsWindow()) { { TRACE_EVENT0("vk", "ShowKeyboardContainerWindow"); GetContentsWindow()->Show(); } { TRACE_EVENT0("vk", "ShowKeyboardContainer"); container->Show(); } } } void KeyboardUI::HideKeyboardContainer(aura::Window* container) { if (HasContentsWindow()) { container->Hide(); GetContentsWindow()->Hide(); } } void KeyboardUI::EnsureCaretInWorkArea() { if (!GetInputMethod()) return; TRACE_EVENT0("vk", "EnsureCaretInWorkArea"); const aura::Window* contents_window = GetContentsWindow(); const gfx::Rect keyboard_bounds_in_screen = contents_window->IsVisible() ? contents_window->GetBoundsInScreen() : gfx::Rect(); if (keyboard_controller_->IsOverscrollAllowed()) { GetInputMethod()->SetOnScreenKeyboardBounds(keyboard_bounds_in_screen); } else if (GetInputMethod()->GetTextInputClient()) { GetInputMethod()->GetTextInputClient()->EnsureCaretNotInRect( keyboard_bounds_in_screen); } } void KeyboardUI::SetController(KeyboardController* controller) { keyboard_controller_ = controller; } } // namespace keyboard
null
null
null
null
48,393
19,453
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
19,453
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/variations/service/safe_seed_manager.h" #include <memory> #include <string> #include "base/base_switches.h" #include "base/command_line.h" #include "base/macros.h" #include "base/test/histogram_tester.h" #include "base/time/time.h" #include "components/prefs/testing_pref_service.h" #include "components/variations/client_filterable_state.h" #include "components/variations/pref_names.h" #include "components/variations/variations_seed_store.h" #include "testing/gtest/include/gtest/gtest.h" namespace variations { namespace { const char kTestSeed[] = "compressed, base-64 encoded serialized seed data"; const char kTestSignature[] = "a completely unforged signature, I promise!"; const char kTestLocale[] = "en-US"; const char kTestPermanentConsistencyCountry[] = "US"; const char kTestSessionConsistencyCountry[] = "CA"; base::Time GetTestFetchTime() { return base::Time::FromDeltaSinceWindowsEpoch(base::TimeDelta::FromDays(123)); } // A simple fake data store. class FakeSeedStore : public VariationsSeedStore { public: FakeSeedStore() : VariationsSeedStore(nullptr) {} ~FakeSeedStore() override = default; bool StoreSafeSeed(const std::string& seed_data, const std::string& base64_seed_signature, const ClientFilterableState& client_state, base::Time seed_fetch_time) override { seed_data_ = seed_data; signature_ = base64_seed_signature; date_ = client_state.reference_date; locale_ = client_state.locale; permanent_consistency_country_ = client_state.permanent_consistency_country; session_consistency_country_ = client_state.session_consistency_country; fetch_time_ = seed_fetch_time; return true; } const std::string& seed_data() const { return seed_data_; } const std::string& signature() const { return signature_; } const base::Time& date() const { return date_; } const std::string& locale() const { return locale_; } const std::string& permanent_consistency_country() const { return permanent_consistency_country_; } const std::string& session_consistency_country() const { return session_consistency_country_; } const base::Time& fetch_time() const { return fetch_time_; } private: // The stored data. std::string seed_data_; std::string signature_; base::Time date_; std::string locale_; std::string permanent_consistency_country_; std::string session_consistency_country_; base::Time fetch_time_; DISALLOW_COPY_AND_ASSIGN(FakeSeedStore); }; // Passes the default test values as the active state into the // |safe_seed_manager|. void SetDefaultActiveState(SafeSeedManager* safe_seed_manager) { std::unique_ptr<ClientFilterableState> client_state = std::make_unique<ClientFilterableState>(); client_state->locale = kTestLocale; client_state->permanent_consistency_country = kTestPermanentConsistencyCountry; client_state->session_consistency_country = kTestSessionConsistencyCountry; client_state->reference_date = base::Time::UnixEpoch(); safe_seed_manager->SetActiveSeedState( kTestSeed, kTestSignature, std::move(client_state), GetTestFetchTime()); } // Verifies that the default test values were written to the seed store. void ExpectDefaultActiveState(const FakeSeedStore& seed_store) { EXPECT_EQ(kTestSeed, seed_store.seed_data()); EXPECT_EQ(kTestSignature, seed_store.signature()); EXPECT_EQ(kTestLocale, seed_store.locale()); EXPECT_EQ(kTestPermanentConsistencyCountry, seed_store.permanent_consistency_country()); EXPECT_EQ(kTestSessionConsistencyCountry, seed_store.session_consistency_country()); EXPECT_EQ(base::Time::UnixEpoch(), seed_store.date()); EXPECT_EQ(GetTestFetchTime(), seed_store.fetch_time()); } } // namespace class SafeSeedManagerTest : public testing::Test { public: SafeSeedManagerTest() { SafeSeedManager::RegisterPrefs(prefs_.registry()); } ~SafeSeedManagerTest() override = default; protected: TestingPrefServiceSimple prefs_; }; TEST_F(SafeSeedManagerTest, RecordSuccessfulFetch_FirstCallSavesSafeSeed) { SafeSeedManager safe_seed_manager(true, &prefs_); SetDefaultActiveState(&safe_seed_manager); FakeSeedStore seed_store; safe_seed_manager.RecordSuccessfulFetch(&seed_store); ExpectDefaultActiveState(seed_store); } TEST_F(SafeSeedManagerTest, RecordSuccessfulFetch_RepeatedCallsRetainSafeSeed) { SafeSeedManager safe_seed_manager(true, &prefs_); SetDefaultActiveState(&safe_seed_manager); FakeSeedStore seed_store; safe_seed_manager.RecordSuccessfulFetch(&seed_store); safe_seed_manager.RecordSuccessfulFetch(&seed_store); safe_seed_manager.RecordSuccessfulFetch(&seed_store); ExpectDefaultActiveState(seed_store); } TEST_F(SafeSeedManagerTest, RecordSuccessfulFetch_NoActiveState_DoesntSaveSafeSeed) { SafeSeedManager safe_seed_manager(true, &prefs_); // Omit setting any active state. FakeSeedStore seed_store; safe_seed_manager.RecordSuccessfulFetch(&seed_store); EXPECT_EQ(std::string(), seed_store.seed_data()); EXPECT_EQ(std::string(), seed_store.signature()); EXPECT_EQ(std::string(), seed_store.locale()); EXPECT_EQ(std::string(), seed_store.permanent_consistency_country()); EXPECT_EQ(std::string(), seed_store.session_consistency_country()); EXPECT_EQ(base::Time(), seed_store.date()); EXPECT_EQ(base::Time(), seed_store.fetch_time()); } TEST_F(SafeSeedManagerTest, StreakMetrics_NoPrefs) { base::HistogramTester histogram_tester; SafeSeedManager safe_seed_manager(true, &prefs_); histogram_tester.ExpectUniqueSample("Variations.SafeMode.Streak.Crashes", 0, 1); histogram_tester.ExpectUniqueSample( "Variations.SafeMode.Streak.FetchFailures", 0, 1); } TEST_F(SafeSeedManagerTest, StreakMetrics_NoCrashes_NoFetchFailures) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 0); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 0); base::HistogramTester histogram_tester; SafeSeedManager safe_seed_manager(true, &prefs_); histogram_tester.ExpectUniqueSample("Variations.SafeMode.Streak.Crashes", 0, 1); histogram_tester.ExpectUniqueSample( "Variations.SafeMode.Streak.FetchFailures", 0, 1); } TEST_F(SafeSeedManagerTest, StreakMetrics_SomeCrashes_SomeFetchFailures) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 1); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 2); base::HistogramTester histogram_tester; SafeSeedManager safe_seed_manager(true, &prefs_); histogram_tester.ExpectUniqueSample("Variations.SafeMode.Streak.Crashes", 1, 1); histogram_tester.ExpectUniqueSample( "Variations.SafeMode.Streak.FetchFailures", 2, 1); } TEST_F(SafeSeedManagerTest, StreakMetrics_CrashIncrementsCrashStreak) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 1); base::HistogramTester histogram_tester; SafeSeedManager safe_seed_manager(false, &prefs_); EXPECT_EQ(2, prefs_.GetInteger(prefs::kVariationsCrashStreak)); histogram_tester.ExpectUniqueSample("Variations.SafeMode.Streak.Crashes", 2, 1); } TEST_F(SafeSeedManagerTest, StreakMetrics_CrashIncrementsCrashStreak_NoPrefs) { base::HistogramTester histogram_tester; SafeSeedManager safe_seed_manager(false, &prefs_); EXPECT_EQ(1, prefs_.GetInteger(prefs::kVariationsCrashStreak)); histogram_tester.ExpectUniqueSample("Variations.SafeMode.Streak.Crashes", 1, 1); } TEST_F(SafeSeedManagerTest, ShouldRunInSafeMode_OverriddenByCommandlineFlag) { // So many failures. prefs_.SetInteger(prefs::kVariationsCrashStreak, 100); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 100); base::CommandLine::ForCurrentProcess()->AppendSwitchASCII( ::switches::kForceFieldTrials, "SomeFieldTrial"); SafeSeedManager safe_seed_manager(true, &prefs_); EXPECT_FALSE(safe_seed_manager.ShouldRunInSafeMode()); } TEST_F(SafeSeedManagerTest, ShouldRunInSafeMode_NoCrashes_NoFetchFailures) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 0); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 0); SafeSeedManager safe_seed_manager(true, &prefs_); EXPECT_FALSE(safe_seed_manager.ShouldRunInSafeMode()); } TEST_F(SafeSeedManagerTest, ShouldRunInSafeMode_NoPrefs) { // Don't explicitly set either of the prefs. The implicit/default values // should be zero. SafeSeedManager safe_seed_manager(true, &prefs_); EXPECT_FALSE(safe_seed_manager.ShouldRunInSafeMode()); } TEST_F(SafeSeedManagerTest, ShouldRunInSafeMode_FewCrashes_FewFetchFailures) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 2); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 2); SafeSeedManager safe_seed_manager(true, &prefs_); EXPECT_FALSE(safe_seed_manager.ShouldRunInSafeMode()); } TEST_F(SafeSeedManagerTest, ShouldRunInSafeMode_ManyCrashes_NoFetchFailures) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 3); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 0); SafeSeedManager safe_seed_manager(true, &prefs_); EXPECT_TRUE(safe_seed_manager.ShouldRunInSafeMode()); } TEST_F(SafeSeedManagerTest, ShouldRunInSafeMode_NoCrashes_ManyFetchFailures) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 0); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 50); SafeSeedManager safe_seed_manager(true, &prefs_); EXPECT_TRUE(safe_seed_manager.ShouldRunInSafeMode()); } TEST_F(SafeSeedManagerTest, ShouldRunInSafeMode_ManyCrashes_ManyFetchFailures) { prefs_.SetInteger(prefs::kVariationsCrashStreak, 3); prefs_.SetInteger(prefs::kVariationsFailedToFetchSeedStreak, 50); SafeSeedManager safe_seed_manager(true, &prefs_); EXPECT_TRUE(safe_seed_manager.ShouldRunInSafeMode()); } } // namespace variations
null
null
null
null
16,316
23,383
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
188,378
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * drivers/pci/pcie/aer/aerdrv_core.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * This file implements the core part of PCI-Express AER. When an pci-express * error is delivered, an error message will be collected and printed to * console, then, an error recovery procedure will be executed by following * the pci error recovery rules. * * Copyright (C) 2006 Intel Corp. * Tom Long Nguyen (tom.l.nguyen@intel.com) * Zhang Yanmin (yanmin.zhang@intel.com) * */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/suspend.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/kfifo.h> #include "aerdrv.h" #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) int pci_enable_pcie_error_reporting(struct pci_dev *dev) { if (pcie_aer_get_firmware_first(dev)) return -EIO; if (!dev->aer_cap) return -EIO; return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); } EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); int pci_disable_pcie_error_reporting(struct pci_dev *dev) { if (pcie_aer_get_firmware_first(dev)) return -EIO; return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); } EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) { int pos; u32 status; pos = dev->aer_cap; if (!pos) return -EIO; pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); if (status) pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); return 0; } EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) { int pos; u32 status; int port_type; if (!pci_is_pcie(dev)) return -ENODEV; pos = dev->aer_cap; if (!pos) return -EIO; port_type = pci_pcie_type(dev); if (port_type == PCI_EXP_TYPE_ROOT_PORT) { pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status); } pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); return 0; } int pci_aer_init(struct pci_dev *dev) { dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); return pci_cleanup_aer_error_status_regs(dev); } /** * add_error_device - list device to be handled * @e_info: pointer to error info * @dev: pointer to pci_dev to be added */ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) { if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { e_info->dev[e_info->error_dev_num] = dev; e_info->error_dev_num++; return 0; } return -ENOSPC; } /** * is_error_source - check whether the device is source of reported error * @dev: pointer to pci_dev to be checked * @e_info: pointer to reported error info */ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info) { int pos; u32 status, mask; u16 reg16; /* * When bus id is equal to 0, it might be a bad id * reported by root port. */ if ((PCI_BUS_NUM(e_info->id) != 0) && !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) { /* Device ID match? */ if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) return true; /* Continue id comparing if there is no multiple error */ if (!e_info->multi_error_valid) return false; } /* * When either * 1) bus id is equal to 0. Some ports might lose the bus * id of error source id; * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set * 3) There are multiple errors and prior ID comparing fails; * We check AER status registers to find possible reporter. */ if (atomic_read(&dev->enable_cnt) == 0) return false; /* Check if AER is enabled */ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16); if (!(reg16 & PCI_EXP_AER_FLAGS)) return false; pos = dev->aer_cap; if (!pos) return false; /* Check if error is recorded */ if (e_info->severity == AER_CORRECTABLE) { pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); } else { pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); } if (status & ~mask) return true; return false; } static int find_device_iter(struct pci_dev *dev, void *data) { struct aer_err_info *e_info = (struct aer_err_info *)data; if (is_error_source(dev, e_info)) { /* List this device */ if (add_error_device(e_info, dev)) { /* We cannot handle more... Stop iteration */ /* TODO: Should print error message here? */ return 1; } /* If there is only a single error, stop iteration */ if (!e_info->multi_error_valid) return 1; } return 0; } /** * find_source_device - search through device hierarchy for source device * @parent: pointer to Root Port pci_dev data structure * @e_info: including detailed error information such like id * * Return true if found. * * Invoked by DPC when error is detected at the Root Port. * Caller of this function must set id, severity, and multi_error_valid of * struct aer_err_info pointed by @e_info properly. This function must fill * e_info->error_dev_num and e_info->dev[], based on the given information. */ static bool find_source_device(struct pci_dev *parent, struct aer_err_info *e_info) { struct pci_dev *dev = parent; int result; /* Must reset in this function */ e_info->error_dev_num = 0; /* Is Root Port an agent that sends error message? */ result = find_device_iter(dev, e_info); if (result) return true; pci_walk_bus(parent->subordinate, find_device_iter, e_info); if (!e_info->error_dev_num) { dev_printk(KERN_DEBUG, &parent->dev, "can't find device of ID%04x\n", e_info->id); return false; } return true; } static int report_error_detected(struct pci_dev *dev, void *data) { pci_ers_result_t vote; const struct pci_error_handlers *err_handler; struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; device_lock(&dev->dev); dev->error_state = result_data->state; if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->error_detected) { if (result_data->state == pci_channel_io_frozen && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { /* * In case of fatal recovery, if one of down- * stream device has no driver. We might be * unable to recover because a later insmod * of a driver for this device is unaware of * its hw state. */ dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n", dev->driver ? "no AER-aware driver" : "no driver"); } /* * If there's any device in the subtree that does not * have an error_detected callback, returning * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of * the subsequent mmio_enabled/slot_reset/resume * callbacks of "any" device in the subtree. All the * devices in the subtree are left in the error state * without recovery. */ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) vote = PCI_ERS_RESULT_NO_AER_DRIVER; else vote = PCI_ERS_RESULT_NONE; } else { err_handler = dev->driver->err_handler; vote = err_handler->error_detected(dev, result_data->state); } result_data->result = merge_result(result_data->result, vote); device_unlock(&dev->dev); return 0; } static int report_mmio_enabled(struct pci_dev *dev, void *data) { pci_ers_result_t vote; const struct pci_error_handlers *err_handler; struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; device_lock(&dev->dev); if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->mmio_enabled) goto out; err_handler = dev->driver->err_handler; vote = err_handler->mmio_enabled(dev); result_data->result = merge_result(result_data->result, vote); out: device_unlock(&dev->dev); return 0; } static int report_slot_reset(struct pci_dev *dev, void *data) { pci_ers_result_t vote; const struct pci_error_handlers *err_handler; struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; device_lock(&dev->dev); if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->slot_reset) goto out; err_handler = dev->driver->err_handler; vote = err_handler->slot_reset(dev); result_data->result = merge_result(result_data->result, vote); out: device_unlock(&dev->dev); return 0; } static int report_resume(struct pci_dev *dev, void *data) { const struct pci_error_handlers *err_handler; device_lock(&dev->dev); dev->error_state = pci_channel_io_normal; if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->resume) goto out; err_handler = dev->driver->err_handler; err_handler->resume(dev); out: device_unlock(&dev->dev); return 0; } /** * broadcast_error_message - handle message broadcast to downstream drivers * @dev: pointer to from where in a hierarchy message is broadcasted down * @state: error state * @error_mesg: message to print * @cb: callback to be broadcasted * * Invoked during error recovery process. Once being invoked, the content * of error severity will be broadcasted to all downstream drivers in a * hierarchy in question. */ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, enum pci_channel_state state, char *error_mesg, int (*cb)(struct pci_dev *, void *)) { struct aer_broadcast_data result_data; dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg); result_data.state = state; if (cb == report_error_detected) result_data.result = PCI_ERS_RESULT_CAN_RECOVER; else result_data.result = PCI_ERS_RESULT_RECOVERED; if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { /* * If the error is reported by a bridge, we think this error * is related to the downstream link of the bridge, so we * do error recovery on all subordinates of the bridge instead * of the bridge and clear the error status of the bridge. */ if (cb == report_error_detected) dev->error_state = state; pci_walk_bus(dev->subordinate, cb, &result_data); if (cb == report_resume) { pci_cleanup_aer_uncorrect_error_status(dev); dev->error_state = pci_channel_io_normal; } } else { /* * If the error is reported by an end point, we think this * error is related to the upstream link of the end point. */ pci_walk_bus(dev->bus, cb, &result_data); } return result_data.result; } /** * default_reset_link - default reset function * @dev: pointer to pci_dev data structure * * Invoked when performing link reset on a Downstream Port or a * Root Port with no aer driver. */ static pci_ers_result_t default_reset_link(struct pci_dev *dev) { pci_reset_bridge_secondary_bus(dev); dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n"); return PCI_ERS_RESULT_RECOVERED; } static int find_aer_service_iter(struct device *device, void *data) { struct pcie_port_service_driver *service_driver, **drv; drv = (struct pcie_port_service_driver **) data; if (device->bus == &pcie_port_bus_type && device->driver) { service_driver = to_service_driver(device->driver); if (service_driver->service == PCIE_PORT_SERVICE_AER) { *drv = service_driver; return 1; } } return 0; } static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev) { struct pcie_port_service_driver *drv = NULL; device_for_each_child(&dev->dev, &drv, find_aer_service_iter); return drv; } static pci_ers_result_t reset_link(struct pci_dev *dev) { struct pci_dev *udev; pci_ers_result_t status; struct pcie_port_service_driver *driver; if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { /* Reset this port for all subordinates */ udev = dev; } else { /* Reset the upstream component (likely downstream port) */ udev = dev->bus->self; } /* Use the aer driver of the component firstly */ driver = find_aer_service(udev); if (driver && driver->reset_link) { status = driver->reset_link(udev); } else if (udev->has_secondary_link) { status = default_reset_link(udev); } else { dev_printk(KERN_DEBUG, &dev->dev, "no link-reset support at upstream device %s\n", pci_name(udev)); return PCI_ERS_RESULT_DISCONNECT; } if (status != PCI_ERS_RESULT_RECOVERED) { dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream device %s failed\n", pci_name(udev)); return PCI_ERS_RESULT_DISCONNECT; } return status; } /** * do_recovery - handle nonfatal/fatal error recovery process * @dev: pointer to a pci_dev data structure of agent detecting an error * @severity: error severity type * * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast * error detected message to all downstream drivers within a hierarchy in * question and return the returned code. */ static void do_recovery(struct pci_dev *dev, int severity) { pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED; enum pci_channel_state state; if (severity == AER_FATAL) state = pci_channel_io_frozen; else state = pci_channel_io_normal; status = broadcast_error_message(dev, state, "error_detected", report_error_detected); if (severity == AER_FATAL) { result = reset_link(dev); if (result != PCI_ERS_RESULT_RECOVERED) goto failed; } if (status == PCI_ERS_RESULT_CAN_RECOVER) status = broadcast_error_message(dev, state, "mmio_enabled", report_mmio_enabled); if (status == PCI_ERS_RESULT_NEED_RESET) { /* * TODO: Should call platform-specific * functions to reset slot before calling * drivers' slot_reset callbacks? */ status = broadcast_error_message(dev, state, "slot_reset", report_slot_reset); } if (status != PCI_ERS_RESULT_RECOVERED) goto failed; broadcast_error_message(dev, state, "resume", report_resume); dev_info(&dev->dev, "AER: Device recovery successful\n"); return; failed: /* TODO: Should kernel panic here? */ dev_info(&dev->dev, "AER: Device recovery failed\n"); } /** * handle_error_source - handle logging error into an event log * @aerdev: pointer to pcie_device data structure of the root port * @dev: pointer to pci_dev data structure of error source device * @info: comprehensive error information * * Invoked when an error being detected by Root Port. */ static void handle_error_source(struct pcie_device *aerdev, struct pci_dev *dev, struct aer_err_info *info) { int pos; if (info->severity == AER_CORRECTABLE) { /* * Correctable error does not need software intervention. * No need to go through error recovery process. */ pos = dev->aer_cap; if (pos) pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, info->status); } else do_recovery(dev, info->severity); } #ifdef CONFIG_ACPI_APEI_PCIEAER static void aer_recover_work_func(struct work_struct *work); #define AER_RECOVER_RING_ORDER 4 #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER) struct aer_recover_entry { u8 bus; u8 devfn; u16 domain; int severity; struct aer_capability_regs *regs; }; static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, AER_RECOVER_RING_SIZE); /* * Mutual exclusion for writers of aer_recover_ring, reader side don't * need lock, because there is only one reader and lock is not needed * between reader and writer. */ static DEFINE_SPINLOCK(aer_recover_ring_lock); static DECLARE_WORK(aer_recover_work, aer_recover_work_func); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, int severity, struct aer_capability_regs *aer_regs) { unsigned long flags; struct aer_recover_entry entry = { .bus = bus, .devfn = devfn, .domain = domain, .severity = severity, .regs = aer_regs, }; spin_lock_irqsave(&aer_recover_ring_lock, flags); if (kfifo_put(&aer_recover_ring, entry)) schedule_work(&aer_recover_work); else pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n", domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); spin_unlock_irqrestore(&aer_recover_ring_lock, flags); } EXPORT_SYMBOL_GPL(aer_recover_queue); static void aer_recover_work_func(struct work_struct *work) { struct aer_recover_entry entry; struct pci_dev *pdev; while (kfifo_get(&aer_recover_ring, &entry)) { pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus, entry.devfn); if (!pdev) { pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n", entry.domain, entry.bus, PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); continue; } cper_print_aer(pdev, entry.severity, entry.regs); do_recovery(pdev, entry.severity); pci_dev_put(pdev); } } #endif /** * get_device_error_info - read error status from dev and store it to info * @dev: pointer to the device expected to have a error record * @info: pointer to structure to store the error record * * Return 1 on success, 0 on error. * * Note that @info is reused among all error devices. Clear fields properly. */ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) { int pos, temp; /* Must reset in this function */ info->status = 0; info->tlp_header_valid = 0; pos = dev->aer_cap; /* The device might not support AER */ if (!pos) return 1; if (info->severity == AER_CORRECTABLE) { pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &info->status); pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &info->mask); if (!(info->status & ~info->mask)) return 0; } else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || info->severity == AER_NONFATAL) { /* Link is still healthy for IO reads */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &info->status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &info->mask); if (!(info->status & ~info->mask)) return 0; /* Get First Error Pointer */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp); info->first_error = PCI_ERR_CAP_FEP(temp); if (info->status & AER_LOG_TLP_MASKS) { info->tlp_header_valid = 1; pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1); pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2); pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3); } } return 1; } static inline void aer_process_err_devices(struct pcie_device *p_device, struct aer_err_info *e_info) { int i; /* Report all before handle them, not to lost records by reset etc. */ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { if (get_device_error_info(e_info->dev[i], e_info)) aer_print_error(e_info->dev[i], e_info); } for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { if (get_device_error_info(e_info->dev[i], e_info)) handle_error_source(p_device, e_info->dev[i], e_info); } } /** * aer_isr_one_error - consume an error detected by root port * @p_device: pointer to error root port service device * @e_src: pointer to an error source */ static void aer_isr_one_error(struct pcie_device *p_device, struct aer_err_source *e_src) { struct aer_rpc *rpc = get_service_data(p_device); struct aer_err_info *e_info = &rpc->e_info; /* * There is a possibility that both correctable error and * uncorrectable error being logged. Report correctable error first. */ if (e_src->status & PCI_ERR_ROOT_COR_RCV) { e_info->id = ERR_COR_ID(e_src->id); e_info->severity = AER_CORRECTABLE; if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) e_info->multi_error_valid = 1; else e_info->multi_error_valid = 0; aer_print_port_info(p_device->port, e_info); if (find_source_device(p_device->port, e_info)) aer_process_err_devices(p_device, e_info); } if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { e_info->id = ERR_UNCOR_ID(e_src->id); if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) e_info->severity = AER_FATAL; else e_info->severity = AER_NONFATAL; if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) e_info->multi_error_valid = 1; else e_info->multi_error_valid = 0; aer_print_port_info(p_device->port, e_info); if (find_source_device(p_device->port, e_info)) aer_process_err_devices(p_device, e_info); } } /** * get_e_source - retrieve an error source * @rpc: pointer to the root port which holds an error * @e_src: pointer to store retrieved error source * * Return 1 if an error source is retrieved, otherwise 0. * * Invoked by DPC handler to consume an error. */ static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src) { unsigned long flags; /* Lock access to Root error producer/consumer index */ spin_lock_irqsave(&rpc->e_lock, flags); if (rpc->prod_idx == rpc->cons_idx) { spin_unlock_irqrestore(&rpc->e_lock, flags); return 0; } *e_src = rpc->e_sources[rpc->cons_idx]; rpc->cons_idx++; if (rpc->cons_idx == AER_ERROR_SOURCES_MAX) rpc->cons_idx = 0; spin_unlock_irqrestore(&rpc->e_lock, flags); return 1; } /** * aer_isr - consume errors detected by root port * @work: definition of this work item * * Invoked, as DPC, when root port records new detected error */ void aer_isr(struct work_struct *work) { struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); struct pcie_device *p_device = rpc->rpd; struct aer_err_source uninitialized_var(e_src); mutex_lock(&rpc->rpc_mutex); while (get_e_source(rpc, &e_src)) aer_isr_one_error(p_device, &e_src); mutex_unlock(&rpc->rpc_mutex); }
null
null
null
null
96,725
12,759
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
12,759
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_UPDATE_CLIENT_REQUEST_SENDER_H_ #define COMPONENTS_UPDATE_CLIENT_REQUEST_SENDER_H_ #include <stdint.h> #include <map> #include <memory> #include <string> #include <vector> #include "base/callback.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/threading/thread_checker.h" #include "net/url_request/url_fetcher_delegate.h" #include "url/gurl.h" namespace client_update_protocol { class Ecdsa; } namespace net { class URLFetcher; } namespace update_client { class Configurator; // Sends a request to one of the urls provided. The class implements a chain // of responsibility design pattern, where the urls are tried in the order they // are specified, until the request to one of them succeeds or all have failed. // CUP signing is optional. class RequestSender : public net::URLFetcherDelegate { public: // If |error| is 0, then the response is provided in the |response| parameter. // |retry_after_sec| contains the value of the X-Retry-After response header, // when the response was received from a cryptographically secure URL. The // range for this value is [-1, 86400]. If |retry_after_sec| is -1 it means // that the header could not be found, or trusted, or had an invalid value. // The upper bound represents a delay of one day. using RequestSenderCallback = base::OnceCallback< void(int error, const std::string& response, int retry_after_sec)>; // This value is chosen not to conflict with network errors defined by // net/base/net_error_list.h. The callers don't have to handle this error in // any meaningful way, but this value may be reported in UMA stats, therefore // avoiding collisions with known network errors is desirable. enum : int { kErrorResponseNotTrusted = -10000 }; explicit RequestSender(scoped_refptr<Configurator> config); ~RequestSender() override; // |use_signing| enables CUP signing of protocol messages exchanged using // this class. |is_foreground| controls the presence and the value for the // X-GoogleUpdate-Interactvity header serialized in the protocol request. // If this optional parameter is set, the values of "fg" or "bg" are sent // for true or false values of this parameter. Otherwise the header is not // sent at all. void Send(const std::vector<GURL>& urls, const std::map<std::string, std::string>& request_extra_headers, const std::string& request_body, bool use_signing, RequestSenderCallback request_sender_callback); private: // Combines the |url| and |query_params| parameters. static GURL BuildUpdateUrl(const GURL& url, const std::string& query_params); // Decodes and returns the public key used by CUP. static std::string GetKey(const char* key_bytes_base64); // Returns the string value of a header of the server response or an empty // string if the header is not available. static std::string GetStringHeaderValue(const net::URLFetcher* source, const char* header_name); // Returns the integral value of a header of the server response or -1 if // if the header is not available or a conversion error has occured. static int64_t GetInt64HeaderValue(const net::URLFetcher* source, const char* header_name); // Overrides for URLFetcherDelegate. void OnURLFetchComplete(const net::URLFetcher* source) override; // Implements the error handling and url fallback mechanism. void SendInternal(); // Called when SendInternal completes. |response_body| and |response_etag| // contain the body and the etag associated with the HTTP response. void SendInternalComplete(int error, const std::string& response_body, const std::string& response_etag, int retry_after_sec); // Helper function to handle a non-continuable error in Send. void HandleSendError(int error, int retry_after_sec); base::ThreadChecker thread_checker_; const scoped_refptr<Configurator> config_; std::vector<GURL> urls_; std::map<std::string, std::string> request_extra_headers_; std::string request_body_; bool use_signing_; // True if CUP signing is used. RequestSenderCallback request_sender_callback_; std::string public_key_; std::vector<GURL>::const_iterator cur_url_; std::unique_ptr<net::URLFetcher> url_fetcher_; std::unique_ptr<client_update_protocol::Ecdsa> signer_; DISALLOW_COPY_AND_ASSIGN(RequestSender); }; } // namespace update_client #endif // COMPONENTS_UPDATE_CLIENT_REQUEST_SENDER_H_
null
null
null
null
9,622
345
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
256,732
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdio.h> #include "internal/cryptlib.h" #include <openssl/objects.h> #include <openssl/bn.h> #include <openssl/x509v3.h> #include <openssl/ts.h> #include "ts_lcl.h" struct status_map_st { int bit; const char *text; }; static int ts_status_map_print(BIO *bio, const struct status_map_st *a, const ASN1_BIT_STRING *v); static int ts_ACCURACY_print_bio(BIO *bio, const TS_ACCURACY *accuracy); int TS_RESP_print_bio(BIO *bio, TS_RESP *a) { BIO_printf(bio, "Status info:\n"); TS_STATUS_INFO_print_bio(bio, a->status_info); BIO_printf(bio, "\nTST info:\n"); if (a->tst_info != NULL) TS_TST_INFO_print_bio(bio, a->tst_info); else BIO_printf(bio, "Not included.\n"); return 1; } int TS_STATUS_INFO_print_bio(BIO *bio, TS_STATUS_INFO *a) { static const char *status_map[] = { "Granted.", "Granted with modifications.", "Rejected.", "Waiting.", "Revocation warning.", "Revoked." }; static const struct status_map_st failure_map[] = { {TS_INFO_BAD_ALG, "unrecognized or unsupported algorithm identifier"}, {TS_INFO_BAD_REQUEST, "transaction not permitted or supported"}, {TS_INFO_BAD_DATA_FORMAT, "the data submitted has the wrong format"}, {TS_INFO_TIME_NOT_AVAILABLE, "the TSA's time source is not available"}, {TS_INFO_UNACCEPTED_POLICY, "the requested TSA policy is not supported by the TSA"}, {TS_INFO_UNACCEPTED_EXTENSION, "the requested extension is not supported by the TSA"}, {TS_INFO_ADD_INFO_NOT_AVAILABLE, "the additional information requested could not be understood " "or is not available"}, {TS_INFO_SYSTEM_FAILURE, "the request cannot be handled due to system failure"}, {-1, NULL} }; long status; int i, lines = 0; BIO_printf(bio, "Status: "); status = ASN1_INTEGER_get(a->status); if (0 <= status && status < (long)OSSL_NELEM(status_map)) BIO_printf(bio, "%s\n", status_map[status]); else BIO_printf(bio, "out of bounds\n"); BIO_printf(bio, "Status description: "); for (i = 0; i < sk_ASN1_UTF8STRING_num(a->text); ++i) { if (i > 0) BIO_puts(bio, "\t"); ASN1_STRING_print_ex(bio, sk_ASN1_UTF8STRING_value(a->text, i), 0); BIO_puts(bio, "\n"); } if (i == 0) BIO_printf(bio, "unspecified\n"); BIO_printf(bio, "Failure info: "); if (a->failure_info != NULL) lines = ts_status_map_print(bio, failure_map, a->failure_info); if (lines == 0) BIO_printf(bio, "unspecified"); BIO_printf(bio, "\n"); return 1; } static int ts_status_map_print(BIO *bio, const struct status_map_st *a, const ASN1_BIT_STRING *v) { int lines = 0; for (; a->bit >= 0; ++a) { if (ASN1_BIT_STRING_get_bit(v, a->bit)) { if (++lines > 1) BIO_printf(bio, ", "); BIO_printf(bio, "%s", a->text); } } return lines; } int TS_TST_INFO_print_bio(BIO *bio, TS_TST_INFO *a) { int v; if (a == NULL) return 0; v = ASN1_INTEGER_get(a->version); BIO_printf(bio, "Version: %d\n", v); BIO_printf(bio, "Policy OID: "); TS_OBJ_print_bio(bio, a->policy_id); TS_MSG_IMPRINT_print_bio(bio, a->msg_imprint); BIO_printf(bio, "Serial number: "); if (a->serial == NULL) BIO_printf(bio, "unspecified"); else TS_ASN1_INTEGER_print_bio(bio, a->serial); BIO_write(bio, "\n", 1); BIO_printf(bio, "Time stamp: "); ASN1_GENERALIZEDTIME_print(bio, a->time); BIO_write(bio, "\n", 1); BIO_printf(bio, "Accuracy: "); if (a->accuracy == NULL) BIO_printf(bio, "unspecified"); else ts_ACCURACY_print_bio(bio, a->accuracy); BIO_write(bio, "\n", 1); BIO_printf(bio, "Ordering: %s\n", a->ordering ? "yes" : "no"); BIO_printf(bio, "Nonce: "); if (a->nonce == NULL) BIO_printf(bio, "unspecified"); else TS_ASN1_INTEGER_print_bio(bio, a->nonce); BIO_write(bio, "\n", 1); BIO_printf(bio, "TSA: "); if (a->tsa == NULL) BIO_printf(bio, "unspecified"); else { STACK_OF(CONF_VALUE) *nval; if ((nval = i2v_GENERAL_NAME(NULL, a->tsa, NULL))) X509V3_EXT_val_prn(bio, nval, 0, 0); sk_CONF_VALUE_pop_free(nval, X509V3_conf_free); } BIO_write(bio, "\n", 1); TS_ext_print_bio(bio, a->extensions); return 1; } static int ts_ACCURACY_print_bio(BIO *bio, const TS_ACCURACY *a) { if (a->seconds != NULL) TS_ASN1_INTEGER_print_bio(bio, a->seconds); else BIO_printf(bio, "unspecified"); BIO_printf(bio, " seconds, "); if (a->millis != NULL) TS_ASN1_INTEGER_print_bio(bio, a->millis); else BIO_printf(bio, "unspecified"); BIO_printf(bio, " millis, "); if (a->micros != NULL) TS_ASN1_INTEGER_print_bio(bio, a->micros); else BIO_printf(bio, "unspecified"); BIO_printf(bio, " micros"); return 1; }
null
null
null
null
118,177
977
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,034
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "v4l2-common.h" const struct fmt_map ff_fmt_conversion_table[] = { //ff_fmt codec_id v4l2_fmt { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 }, { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU420 }, { AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P }, { AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV }, { AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY }, { AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P }, { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 }, { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU410 }, { AV_PIX_FMT_RGB555LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 }, { AV_PIX_FMT_RGB555BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555X }, { AV_PIX_FMT_RGB565LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 }, { AV_PIX_FMT_RGB565BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565X }, { AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 }, { AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 }, #ifdef V4L2_PIX_FMT_XBGR32 { AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_XBGR32 }, { AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_XRGB32 }, { AV_PIX_FMT_BGRA, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_ABGR32 }, { AV_PIX_FMT_ARGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_ARGB32 }, #endif { AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 }, { AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB32 }, { AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY }, #ifdef V4L2_PIX_FMT_Y16 { AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Y16 }, #endif { AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 }, { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG }, { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG }, #ifdef V4L2_PIX_FMT_H264 { AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 }, #endif #ifdef V4L2_PIX_FMT_MPEG4 { AV_PIX_FMT_NONE, AV_CODEC_ID_MPEG4, V4L2_PIX_FMT_MPEG4 }, #endif #ifdef V4L2_PIX_FMT_CPIA1 { AV_PIX_FMT_NONE, AV_CODEC_ID_CPIA, V4L2_PIX_FMT_CPIA1 }, #endif #ifdef V4L2_PIX_FMT_SRGGB8 { AV_PIX_FMT_BAYER_BGGR8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SBGGR8 }, { AV_PIX_FMT_BAYER_GBRG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGBRG8 }, { AV_PIX_FMT_BAYER_GRBG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGRBG8 }, { AV_PIX_FMT_BAYER_RGGB8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SRGGB8 }, #endif { AV_PIX_FMT_NONE, AV_CODEC_ID_NONE, 0 }, }; uint32_t ff_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id) { int i; for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) { if ((codec_id == AV_CODEC_ID_NONE || ff_fmt_conversion_table[i].codec_id == codec_id) && (pix_fmt == AV_PIX_FMT_NONE || ff_fmt_conversion_table[i].ff_fmt == pix_fmt)) { return ff_fmt_conversion_table[i].v4l2_fmt; } } return 0; } enum AVPixelFormat ff_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id) { int i; for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) { if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt && ff_fmt_conversion_table[i].codec_id == codec_id) { return ff_fmt_conversion_table[i].ff_fmt; } } return AV_PIX_FMT_NONE; } enum AVCodecID ff_fmt_v4l2codec(uint32_t v4l2_fmt) { int i; for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) { if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) { return ff_fmt_conversion_table[i].codec_id; } } return AV_CODEC_ID_NONE; }
null
null
null
null
70,089
17,062
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
17,062
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/viz/test/test_texture.h" #include <stddef.h> #include <stdint.h> #include "gpu/GLES2/gl2extchromium.h" #include "third_party/khronos/GLES2/gl2ext.h" namespace viz { size_t TextureSizeBytes(const gfx::Size& size, ResourceFormat format) { unsigned int components_per_pixel = 4; unsigned int bytes_per_component = 1; return size.width() * size.height() * components_per_pixel * bytes_per_component; } TestTexture::TestTexture() : format(RGBA_8888) { // Initialize default parameter values. params[GL_TEXTURE_MAG_FILTER] = GL_LINEAR; params[GL_TEXTURE_MIN_FILTER] = GL_NEAREST_MIPMAP_LINEAR; params[GL_TEXTURE_WRAP_S] = GL_CLAMP_TO_EDGE; params[GL_TEXTURE_WRAP_T] = GL_CLAMP_TO_EDGE; params[GL_TEXTURE_USAGE_ANGLE] = GL_NONE; } TestTexture::~TestTexture() = default; void TestTexture::Reallocate(const gfx::Size& size, ResourceFormat format) { this->size = size; this->format = format; this->data.reset(new uint8_t[TextureSizeBytes(size, format)]); } bool TestTexture::IsValidParameter(GLenum pname) { return params.find(pname) != params.end(); } } // namespace viz
null
null
null
null
13,925
52,149
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
52,149
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MEDIA_AUDIO_AUDIO_UNITTEST_UTIL_H_ #define MEDIA_AUDIO_AUDIO_UNITTEST_UTIL_H_ #include "testing/gtest/include/gtest/gtest.h" namespace media { // Use in tests to either skip or fail a test when the system is missing a // required audio device or library. If the --require-audio-hardware-for-testing // flag is set, missing requirements will cause the test to fail. Otherwise it // will be skipped. #define ABORT_AUDIO_TEST_IF_NOT(requirements_satisfied) \ do { \ bool fail = false; \ if (ShouldAbortAudioTest(requirements_satisfied, #requirements_satisfied, \ &fail)) { \ if (fail) \ FAIL(); \ else \ return; \ } \ } while (false) bool ShouldAbortAudioTest(bool requirements_satisfied, const char* requirements_expression, bool* should_fail); } // namespace media #endif // MEDIA_AUDIO_AUDIO_UNITTEST_UTIL_H_
null
null
null
null
49,012
17,826
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
17,826
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_PAYMENTS_CONTENT_PAYMENT_METHOD_MANIFEST_TABLE_H_ #define COMPONENTS_PAYMENTS_CONTENT_PAYMENT_METHOD_MANIFEST_TABLE_H_ #include <string> #include <vector> #include "base/macros.h" #include "components/webdata/common/web_database_table.h" class WebDatabase; namespace payments { // This class manages payment_method_manifest table in SQLite database. It // expects the following schema. // // payment_method_manifest The table stores WebAppManifestSection.id of the // supported web app in this payment method manifest. // Note that a payment method manifest might contain // multiple supported web apps ids. // // expire_date The expire date in seconds from 1601-01-01 00:00:00 // UTC. // method_name The method name. // web_app_id The supported web app id. // (WebAppManifestSection.id). // class PaymentMethodManifestTable : public WebDatabaseTable { public: PaymentMethodManifestTable(); ~PaymentMethodManifestTable() override; // Retrieves the PaymentMethodManifestTable* owned by |db|. static PaymentMethodManifestTable* FromWebDatabase(WebDatabase* db); // WebDatabaseTable: WebDatabaseTable::TypeKey GetTypeKey() const override; bool CreateTablesIfNecessary() override; bool IsSyncable() override; bool MigrateToVersion(int version, bool* update_compatible_version) override; // Remove expired data. void RemoveExpiredData(); // Adds |payment_method|'s manifest. |web_app_ids| contains supported web apps // ids. bool AddManifest(const std::string& payment_method, const std::vector<std::string>& web_app_ids); // Gets manifest for |payment_method|. Return empty vector if no manifest // exists for this method. std::vector<std::string> GetManifest(const std::string& payment_method); private: DISALLOW_COPY_AND_ASSIGN(PaymentMethodManifestTable); }; } // namespace payments #endif // COMPONENTS_PAYMENTS_CONTENT_PAYMENT_METHOD_MANIFEST_TABLE_H_
null
null
null
null
14,689
14,270
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,270
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_CRONET_ANDROID_CRONET_LIBRARY_LOADER_H_ #define COMPONENTS_CRONET_ANDROID_CRONET_LIBRARY_LOADER_H_ #include <jni.h> namespace cronet { jint CronetOnLoad(JavaVM* vm, void* reserved); void CronetOnUnLoad(JavaVM* jvm, void* reserved); } // namespace cronet #endif // COMPONENTS_CRONET_ANDROID_CRONET_LIBRARY_LOADER_H_
null
null
null
null
11,133
9,275
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
174,270
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/arch/arm/mach-mmp/sram.c * * based on mach-davinci/sram.c - DaVinci simple SRAM allocator * * Copyright (c) 2011 Marvell Semiconductors Inc. * All Rights Reserved * * Add for mmp sram support - Leo Yan <leoy@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/genalloc.h> #include <linux/platform_data/dma-mmp_tdma.h> struct sram_bank_info { char *pool_name; struct gen_pool *gpool; int granularity; phys_addr_t sram_phys; void __iomem *sram_virt; u32 sram_size; struct list_head node; }; static DEFINE_MUTEX(sram_lock); static LIST_HEAD(sram_bank_list); struct gen_pool *sram_get_gpool(char *pool_name) { struct sram_bank_info *info = NULL; if (!pool_name) return NULL; mutex_lock(&sram_lock); list_for_each_entry(info, &sram_bank_list, node) if (!strcmp(pool_name, info->pool_name)) break; mutex_unlock(&sram_lock); if (&info->node == &sram_bank_list) return NULL; return info->gpool; } EXPORT_SYMBOL(sram_get_gpool); static int sram_probe(struct platform_device *pdev) { struct sram_platdata *pdata = pdev->dev.platform_data; struct sram_bank_info *info; struct resource *res; int ret = 0; if (!pdata || !pdata->pool_name) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource defined\n"); ret = -ENODEV; goto out; } if (!resource_size(res)) return 0; info->sram_phys = (phys_addr_t)res->start; info->sram_size = resource_size(res); info->sram_virt = ioremap(info->sram_phys, info->sram_size); info->pool_name = kstrdup(pdata->pool_name, GFP_KERNEL); info->granularity = pdata->granularity; info->gpool = gen_pool_create(ilog2(info->granularity), -1); if (!info->gpool) { dev_err(&pdev->dev, "create pool failed\n"); ret = -ENOMEM; goto create_pool_err; } ret = gen_pool_add_virt(info->gpool, (unsigned long)info->sram_virt, info->sram_phys, info->sram_size, -1); if (ret < 0) { dev_err(&pdev->dev, "add new chunk failed\n"); ret = -ENOMEM; goto add_chunk_err; } mutex_lock(&sram_lock); list_add(&info->node, &sram_bank_list); mutex_unlock(&sram_lock); platform_set_drvdata(pdev, info); dev_info(&pdev->dev, "initialized\n"); return 0; add_chunk_err: gen_pool_destroy(info->gpool); create_pool_err: iounmap(info->sram_virt); kfree(info->pool_name); out: kfree(info); return ret; } static int sram_remove(struct platform_device *pdev) { struct sram_bank_info *info; info = platform_get_drvdata(pdev); if (info == NULL) return -ENODEV; mutex_lock(&sram_lock); list_del(&info->node); mutex_unlock(&sram_lock); gen_pool_destroy(info->gpool); iounmap(info->sram_virt); kfree(info->pool_name); kfree(info); return 0; } static const struct platform_device_id sram_id_table[] = { { "asram", MMP_ASRAM }, { "isram", MMP_ISRAM }, { } }; static struct platform_driver sram_driver = { .probe = sram_probe, .remove = sram_remove, .driver = { .name = "mmp-sram", }, .id_table = sram_id_table, }; static int __init sram_init(void) { return platform_driver_register(&sram_driver); } core_initcall(sram_init); MODULE_LICENSE("GPL");
null
null
null
null
82,617
4,808
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
4,808
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_UI_FAVICON_FAVICON_ATTRIBUTES_WITH_PAYLOAD_H_ #define IOS_CHROME_BROWSER_UI_FAVICON_FAVICON_ATTRIBUTES_WITH_PAYLOAD_H_ #import "ios/chrome/browser/ui/favicon/favicon_attributes.h" #include "components/favicon_base/favicon_types.h" // FaviconAttributes with a payload which is not part of UI. This is to be // created by mediator and used as a FaviconAttributes by UI elements. @interface FaviconAttributesWithPayload : FaviconAttributes + (nullable instancetype)attributesWithImage:(nonnull UIImage*)image; + (nullable instancetype)attributesWithMonogram:(nonnull NSString*)monogram textColor:(nonnull UIColor*)textColor backgroundColor: (nonnull UIColor*)backgroundColor defaultBackgroundColor:(BOOL)defaultBackgroundColor; - (nullable instancetype)init NS_UNAVAILABLE; // Type of the icon used to create with FaviconAttributes. Only valid if the // favicon has an image. @property(nonatomic, assign) favicon_base::IconType iconType; @end #endif // IOS_CHROME_BROWSER_UI_FAVICON_FAVICON_ATTRIBUTES_WITH_PAYLOAD_H_
null
null
null
null
1,671
5,531
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
170,526
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _IMX_SSI_H #define _IMX_SSI_H #define SSI_STX0 0x00 #define SSI_STX1 0x04 #define SSI_SRX0 0x08 #define SSI_SRX1 0x0c #define SSI_SCR 0x10 #define SSI_SCR_CLK_IST (1 << 9) #define SSI_SCR_CLK_IST_SHIFT 9 #define SSI_SCR_TCH_EN (1 << 8) #define SSI_SCR_SYS_CLK_EN (1 << 7) #define SSI_SCR_I2S_MODE_NORM (0 << 5) #define SSI_SCR_I2S_MODE_MSTR (1 << 5) #define SSI_SCR_I2S_MODE_SLAVE (2 << 5) #define SSI_I2S_MODE_MASK (3 << 5) #define SSI_SCR_SYN (1 << 4) #define SSI_SCR_NET (1 << 3) #define SSI_SCR_RE (1 << 2) #define SSI_SCR_TE (1 << 1) #define SSI_SCR_SSIEN (1 << 0) #define SSI_SISR 0x14 #define SSI_SISR_MASK ((1 << 19) - 1) #define SSI_SISR_CMDAU (1 << 18) #define SSI_SISR_CMDDU (1 << 17) #define SSI_SISR_RXT (1 << 16) #define SSI_SISR_RDR1 (1 << 15) #define SSI_SISR_RDR0 (1 << 14) #define SSI_SISR_TDE1 (1 << 13) #define SSI_SISR_TDE0 (1 << 12) #define SSI_SISR_ROE1 (1 << 11) #define SSI_SISR_ROE0 (1 << 10) #define SSI_SISR_TUE1 (1 << 9) #define SSI_SISR_TUE0 (1 << 8) #define SSI_SISR_TFS (1 << 7) #define SSI_SISR_RFS (1 << 6) #define SSI_SISR_TLS (1 << 5) #define SSI_SISR_RLS (1 << 4) #define SSI_SISR_RFF1 (1 << 3) #define SSI_SISR_RFF0 (1 << 2) #define SSI_SISR_TFE1 (1 << 1) #define SSI_SISR_TFE0 (1 << 0) #define SSI_SIER 0x18 #define SSI_SIER_RDMAE (1 << 22) #define SSI_SIER_RIE (1 << 21) #define SSI_SIER_TDMAE (1 << 20) #define SSI_SIER_TIE (1 << 19) #define SSI_SIER_CMDAU_EN (1 << 18) #define SSI_SIER_CMDDU_EN (1 << 17) #define SSI_SIER_RXT_EN (1 << 16) #define SSI_SIER_RDR1_EN (1 << 15) #define SSI_SIER_RDR0_EN (1 << 14) #define SSI_SIER_TDE1_EN (1 << 13) #define SSI_SIER_TDE0_EN (1 << 12) #define SSI_SIER_ROE1_EN (1 << 11) #define SSI_SIER_ROE0_EN (1 << 10) #define SSI_SIER_TUE1_EN (1 << 9) #define SSI_SIER_TUE0_EN (1 << 8) #define SSI_SIER_TFS_EN (1 << 7) #define SSI_SIER_RFS_EN (1 << 6) #define SSI_SIER_TLS_EN (1 << 5) #define SSI_SIER_RLS_EN (1 << 4) #define SSI_SIER_RFF1_EN (1 << 3) #define SSI_SIER_RFF0_EN (1 << 2) #define SSI_SIER_TFE1_EN (1 << 1) #define SSI_SIER_TFE0_EN (1 << 0) #define SSI_STCR 0x1c #define SSI_STCR_TXBIT0 (1 << 9) #define SSI_STCR_TFEN1 (1 << 8) #define SSI_STCR_TFEN0 (1 << 7) #define SSI_FIFO_ENABLE_0_SHIFT 7 #define SSI_STCR_TFDIR (1 << 6) #define SSI_STCR_TXDIR (1 << 5) #define SSI_STCR_TSHFD (1 << 4) #define SSI_STCR_TSCKP (1 << 3) #define SSI_STCR_TFSI (1 << 2) #define SSI_STCR_TFSL (1 << 1) #define SSI_STCR_TEFS (1 << 0) #define SSI_SRCR 0x20 #define SSI_SRCR_RXBIT0 (1 << 9) #define SSI_SRCR_RFEN1 (1 << 8) #define SSI_SRCR_RFEN0 (1 << 7) #define SSI_FIFO_ENABLE_0_SHIFT 7 #define SSI_SRCR_RFDIR (1 << 6) #define SSI_SRCR_RXDIR (1 << 5) #define SSI_SRCR_RSHFD (1 << 4) #define SSI_SRCR_RSCKP (1 << 3) #define SSI_SRCR_RFSI (1 << 2) #define SSI_SRCR_RFSL (1 << 1) #define SSI_SRCR_REFS (1 << 0) #define SSI_SRCCR 0x28 #define SSI_SRCCR_DIV2 (1 << 18) #define SSI_SRCCR_PSR (1 << 17) #define SSI_SRCCR_WL(x) ((((x) - 2) >> 1) << 13) #define SSI_SRCCR_DC(x) (((x) & 0x1f) << 8) #define SSI_SRCCR_PM(x) (((x) & 0xff) << 0) #define SSI_SRCCR_WL_MASK (0xf << 13) #define SSI_SRCCR_DC_MASK (0x1f << 8) #define SSI_SRCCR_PM_MASK (0xff << 0) #define SSI_STCCR 0x24 #define SSI_STCCR_DIV2 (1 << 18) #define SSI_STCCR_PSR (1 << 17) #define SSI_STCCR_WL(x) ((((x) - 2) >> 1) << 13) #define SSI_STCCR_DC(x) (((x) & 0x1f) << 8) #define SSI_STCCR_PM(x) (((x) & 0xff) << 0) #define SSI_STCCR_WL_MASK (0xf << 13) #define SSI_STCCR_DC_MASK (0x1f << 8) #define SSI_STCCR_PM_MASK (0xff << 0) #define SSI_SFCSR 0x2c #define SSI_SFCSR_RFCNT1(x) (((x) & 0xf) << 28) #define SSI_RX_FIFO_1_COUNT_SHIFT 28 #define SSI_SFCSR_TFCNT1(x) (((x) & 0xf) << 24) #define SSI_TX_FIFO_1_COUNT_SHIFT 24 #define SSI_SFCSR_RFWM1(x) (((x) & 0xf) << 20) #define SSI_SFCSR_TFWM1(x) (((x) & 0xf) << 16) #define SSI_SFCSR_RFCNT0(x) (((x) & 0xf) << 12) #define SSI_RX_FIFO_0_COUNT_SHIFT 12 #define SSI_SFCSR_TFCNT0(x) (((x) & 0xf) << 8) #define SSI_TX_FIFO_0_COUNT_SHIFT 8 #define SSI_SFCSR_RFWM0(x) (((x) & 0xf) << 4) #define SSI_SFCSR_TFWM0(x) (((x) & 0xf) << 0) #define SSI_SFCSR_RFWM0_MASK (0xf << 4) #define SSI_SFCSR_TFWM0_MASK (0xf << 0) #define SSI_STR 0x30 #define SSI_STR_TEST (1 << 15) #define SSI_STR_RCK2TCK (1 << 14) #define SSI_STR_RFS2TFS (1 << 13) #define SSI_STR_RXSTATE(x) (((x) & 0xf) << 8) #define SSI_STR_TXD2RXD (1 << 7) #define SSI_STR_TCK2RCK (1 << 6) #define SSI_STR_TFS2RFS (1 << 5) #define SSI_STR_TXSTATE(x) (((x) & 0xf) << 0) #define SSI_SOR 0x34 #define SSI_SOR_CLKOFF (1 << 6) #define SSI_SOR_RX_CLR (1 << 5) #define SSI_SOR_TX_CLR (1 << 4) #define SSI_SOR_INIT (1 << 3) #define SSI_SOR_WAIT(x) (((x) & 0x3) << 1) #define SSI_SOR_WAIT_MASK (0x3 << 1) #define SSI_SOR_SYNRST (1 << 0) #define SSI_SACNT 0x38 #define SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5) #define SSI_SACNT_WR (1 << 4) #define SSI_SACNT_RD (1 << 3) #define SSI_SACNT_TIF (1 << 2) #define SSI_SACNT_FV (1 << 1) #define SSI_SACNT_AC97EN (1 << 0) #define SSI_SACADD 0x3c #define SSI_SACDAT 0x40 #define SSI_SATAG 0x44 #define SSI_STMSK 0x48 #define SSI_SRMSK 0x4c #define SSI_SACCST 0x50 #define SSI_SACCEN 0x54 #define SSI_SACCDIS 0x58 /* SSI clock sources */ #define IMX_SSP_SYS_CLK 0 /* SSI audio dividers */ #define IMX_SSI_TX_DIV_2 0 #define IMX_SSI_TX_DIV_PSR 1 #define IMX_SSI_TX_DIV_PM 2 #define IMX_SSI_RX_DIV_2 3 #define IMX_SSI_RX_DIV_PSR 4 #define IMX_SSI_RX_DIV_PM 5 #define DRV_NAME "imx-ssi" #include <linux/dmaengine.h> #include <linux/platform_data/dma-imx.h> #include <sound/dmaengine_pcm.h> #include "imx-pcm.h" struct imx_ssi { struct platform_device *ac97_dev; struct snd_soc_dai *imx_ac97; struct clk *clk; void __iomem *base; int irq; int fiq_enable; unsigned int offset; unsigned int flags; void (*ac97_reset) (struct snd_ac97 *ac97); void (*ac97_warm_reset)(struct snd_ac97 *ac97); struct snd_dmaengine_dai_dma_data dma_params_rx; struct snd_dmaengine_dai_dma_data dma_params_tx; struct imx_dma_data filter_data_tx; struct imx_dma_data filter_data_rx; struct imx_pcm_fiq_params fiq_params; int fiq_init; int dma_init; }; #endif /* _IMX_SSI_H */
null
null
null
null
78,873
1,312
null
train_val
83ed75feba32e46f736fcce0d96a0445f29b96c2
163,156
krb5
0
https://github.com/krb5/krb5
2016-01-27 15:43:28-05:00
/* @(#)xdr.c 2.1 88/07/29 4.0 RPCSRC */ /* * Copyright (c) 2010, Oracle America, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the "Oracle America, Inc." nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if !defined(lint) && defined(SCCSIDS) static char sccsid[] = "@(#)xdr.c 1.35 87/08/12"; #endif /* * xdr.c, Generic XDR routines implementation. * * These are the "generic" xdr routines used to serialize and de-serialize * most common data items. See xdr.h for more info on the interface to * xdr. */ #include <stdio.h> #include <string.h> #include <gssrpc/types.h> #include <gssrpc/xdr.h> /* * constants specific to the xdr "protocol" */ #define XDR_FALSE ((long) 0) #define XDR_TRUE ((long) 1) #define LASTUNSIGNED ((u_int) 0-1) #ifdef USE_VALGRIND #include <valgrind/memcheck.h> #else #define VALGRIND_CHECK_DEFINED(LVALUE) ((void)0) #define VALGRIND_CHECK_READABLE(PTR,SIZE) ((void)0) #endif /* * for unit alignment */ static char xdr_zero[BYTES_PER_XDR_UNIT] = { 0, 0, 0, 0 }; /* * Free a data structure using XDR * Not a filter, but a convenient utility nonetheless */ void xdr_free(xdrproc_t proc, void *objp) { XDR x; x.x_op = XDR_FREE; (*proc)(&x, objp); } /* * XDR nothing */ bool_t xdr_void(XDR *xdrs, void *addr) { return (TRUE); } /* * XDR integers */ bool_t xdr_int(XDR *xdrs, int *ip) { long l; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*ip); if (*ip > 0x7fffffffL || *ip < -0x7fffffffL - 1L) return (FALSE); l = (long) *ip; return (XDR_PUTLONG(xdrs, &l)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, &l)) return (FALSE); if (l > INT_MAX || l < INT_MIN) return (FALSE); *ip = (int) l; case XDR_FREE: return (TRUE); } /*NOTREACHED*/ return(FALSE); } /* * XDR unsigned integers */ bool_t xdr_u_int(XDR *xdrs, u_int *up) { u_long l; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*up); if (*up > 0xffffffffUL) return (FALSE); l = (u_long)*up; return (XDR_PUTLONG(xdrs, (long *) &l)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, (long *) &l)) return (FALSE); if ((uint32_t)l > UINT_MAX) return (FALSE); *up = (u_int) l; return (TRUE); case XDR_FREE: return (TRUE); } /*NOTREACHED*/ return(FALSE); } /* * XDR long integers */ bool_t xdr_long(XDR *xdrs, long *lp) { switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*lp); if (*lp > 0x7fffffffL || *lp < -0x7fffffffL - 1L) return (FALSE); return (XDR_PUTLONG(xdrs, lp)); case XDR_DECODE: return (XDR_GETLONG(xdrs, lp)); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR unsigned long integers */ bool_t xdr_u_long(XDR *xdrs, u_long *ulp) { switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*ulp); if (*ulp > 0xffffffffUL) return (FALSE); return (XDR_PUTLONG(xdrs, (long *) ulp)); case XDR_DECODE: return (XDR_GETLONG(xdrs, (long *) ulp)); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR short integers */ bool_t xdr_short(register XDR *xdrs, short *sp) { long l; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*sp); l = (long) *sp; return (XDR_PUTLONG(xdrs, &l)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, &l)) { return (FALSE); } if (l > SHRT_MAX || l < SHRT_MIN) return (FALSE); *sp = (short) l; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR unsigned short integers */ bool_t xdr_u_short(register XDR *xdrs, u_short *usp) { u_long l; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*usp); l = (u_long) *usp; return (XDR_PUTLONG(xdrs, (long *) &l)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, (long *) &l)) { return (FALSE); } *usp = (u_short) l; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR a char */ bool_t xdr_char(XDR *xdrs, char *cp) { int i; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*cp); break; default: break; } i = (*cp); if (!xdr_int(xdrs, &i)) { return (FALSE); } *cp = i; return (TRUE); } /* * XDR an unsigned char */ bool_t xdr_u_char(XDR *xdrs, u_char *cp) { u_int u; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*cp); break; default: break; } u = (*cp); if (!xdr_u_int(xdrs, &u)) { return (FALSE); } *cp = u; return (TRUE); } /* * XDR booleans */ bool_t xdr_bool(register XDR *xdrs, bool_t *bp) { long lb; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*bp); lb = *bp ? XDR_TRUE : XDR_FALSE; return (XDR_PUTLONG(xdrs, &lb)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, &lb)) { return (FALSE); } *bp = (lb == XDR_FALSE) ? FALSE : TRUE; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR enumerations */ bool_t xdr_enum(XDR *xdrs, enum_t *ep) { #ifndef lint enum sizecheck { SIZEVAL }; /* used to find the size of an enum */ /* * enums are treated as ints */ switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*ep); break; default: break; } if (sizeof (enum sizecheck) == sizeof (long)) { return (xdr_long(xdrs, (long *)ep)); } else if (sizeof (enum sizecheck) == sizeof (int)) { return (xdr_int(xdrs, (int *)ep)); } else if (sizeof (enum sizecheck) == sizeof (short)) { return (xdr_short(xdrs, (short *)ep)); } else { return (FALSE); } #else (void) (xdr_short(xdrs, (short *)ep)); return (xdr_long(xdrs, (long *)ep)); #endif } /* * XDR opaque data * Allows the specification of a fixed size sequence of opaque bytes. * cp points to the opaque object and cnt gives the byte length. */ bool_t xdr_opaque(XDR *xdrs, caddr_t cp, u_int cnt) { register u_int rndup; static int crud[BYTES_PER_XDR_UNIT]; /* * if no data we are done */ if (cnt == 0) return (TRUE); /* * round byte count to full xdr units */ rndup = cnt % BYTES_PER_XDR_UNIT; if (rndup > 0) rndup = BYTES_PER_XDR_UNIT - rndup; if (xdrs->x_op == XDR_DECODE) { if (!XDR_GETBYTES(xdrs, cp, cnt)) { return (FALSE); } if (rndup == 0) return (TRUE); return (XDR_GETBYTES(xdrs, (caddr_t) (void *)crud, rndup)); } if (xdrs->x_op == XDR_ENCODE) { VALGRIND_CHECK_READABLE((volatile void *)cp, cnt); if (!XDR_PUTBYTES(xdrs, cp, cnt)) { return (FALSE); } if (rndup == 0) return (TRUE); return (XDR_PUTBYTES(xdrs, xdr_zero, rndup)); } if (xdrs->x_op == XDR_FREE) { return (TRUE); } return (FALSE); } /* * XDR counted bytes * *cpp is a pointer to the bytes, *sizep is the count. * If *cpp is NULL maxsize bytes are allocated */ bool_t xdr_bytes( XDR *xdrs, char **cpp, u_int *sizep, u_int maxsize) { register char *sp = *cpp; /* sp is the actual string pointer */ register u_int nodesize; /* * first deal with the length since xdr bytes are counted */ if (! xdr_u_int(xdrs, sizep)) { return (FALSE); } nodesize = *sizep; if ((nodesize > maxsize) && (xdrs->x_op != XDR_FREE)) { return (FALSE); } /* * now deal with the actual bytes */ switch (xdrs->x_op) { case XDR_DECODE: if (nodesize == 0) { return (TRUE); } if (sp == NULL) { *cpp = sp = (char *)mem_alloc(nodesize); } if (sp == NULL) { (void) fprintf(stderr, "xdr_bytes: out of memory\n"); return (FALSE); } /* fall into ... */ case XDR_ENCODE: return (xdr_opaque(xdrs, sp, nodesize)); case XDR_FREE: if (sp != NULL) { mem_free(sp, nodesize); *cpp = NULL; } return (TRUE); } return (FALSE); } /* * Implemented here due to commonality of the object. */ bool_t xdr_netobj(XDR *xdrs, struct netobj *np) { return (xdr_bytes(xdrs, &np->n_bytes, &np->n_len, MAX_NETOBJ_SZ)); } bool_t xdr_int32(XDR *xdrs, int32_t *ip) { long l; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*ip); l = *ip; return (xdr_long(xdrs, &l)); case XDR_DECODE: if (!xdr_long(xdrs, &l)) { return (FALSE); } *ip = l; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } bool_t xdr_u_int32(XDR *xdrs, uint32_t *up) { u_long ul; switch (xdrs->x_op) { case XDR_ENCODE: VALGRIND_CHECK_DEFINED(*up); ul = *up; return (xdr_u_long(xdrs, &ul)); case XDR_DECODE: if (!xdr_u_long(xdrs, &ul)) { return (FALSE); } *up = ul; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR a descriminated union * Support routine for discriminated unions. * You create an array of xdrdiscrim structures, terminated with * an entry with a null procedure pointer. The routine gets * the discriminant value and then searches the array of xdrdiscrims * looking for that value. It calls the procedure given in the xdrdiscrim * to handle the discriminant. If there is no specific routine a default * routine may be called. * If there is no specific or default routine an error is returned. */ bool_t xdr_union( XDR *xdrs, enum_t *dscmp, /* enum to decide which arm to work on */ char *unp, /* the union itself */ struct xdr_discrim *choices, /* [value, xdr proc] for each arm */ xdrproc_t dfault /* default xdr routine */ ) { register enum_t dscm; /* * we deal with the discriminator; it's an enum */ if (! xdr_enum(xdrs, dscmp)) { return (FALSE); } dscm = *dscmp; /* * search choices for a value that matches the discriminator. * if we find one, execute the xdr routine for that value. */ for (; choices->proc != NULL_xdrproc_t; choices++) { if (choices->value == dscm) return ((*(choices->proc))(xdrs, unp, LASTUNSIGNED)); } /* * no match - execute the default xdr routine if there is one */ return ((dfault == NULL_xdrproc_t) ? FALSE : (*dfault)(xdrs, unp, LASTUNSIGNED)); } /* * Non-portable xdr primitives. * Care should be taken when moving these routines to new architectures. */ /* * XDR null terminated ASCII strings * xdr_string deals with "C strings" - arrays of bytes that are * terminated by a NULL character. The parameter cpp references a * pointer to storage; If the pointer is null, then the necessary * storage is allocated. The last parameter is the max allowed length * of the string as specified by a protocol. */ bool_t xdr_string(XDR *xdrs, char **cpp, u_int maxsize) { register char *sp = *cpp; /* sp is the actual string pointer */ u_int size; u_int nodesize; /* * first deal with the length since xdr strings are counted-strings */ switch (xdrs->x_op) { case XDR_FREE: if (sp == NULL) { return(TRUE); /* already free */ } /* fall through... */ case XDR_ENCODE: size = strlen(sp); break; case XDR_DECODE: break; } if (! xdr_u_int(xdrs, &size)) { return (FALSE); } if (size >= maxsize) { return (FALSE); } nodesize = size + 1; /* * now deal with the actual bytes */ switch (xdrs->x_op) { case XDR_DECODE: if (nodesize == 0) { return (TRUE); } if (sp == NULL) *cpp = sp = (char *)mem_alloc(nodesize); if (sp == NULL) { (void) fprintf(stderr, "xdr_string: out of memory\n"); return (FALSE); } sp[size] = 0; /* fall into ... */ case XDR_ENCODE: return (xdr_opaque(xdrs, sp, size)); case XDR_FREE: mem_free(sp, nodesize); *cpp = NULL; return (TRUE); } return (FALSE); } /* * Wrapper for xdr_string that can be called directly from * routines like clnt_call */ bool_t xdr_wrapstring(XDR *xdrs, char **cpp) { if (xdr_string(xdrs, cpp, LASTUNSIGNED)) { return (TRUE); } return (FALSE); }
null
null
null
null
74,464
587
9
train_val
b22f5126a24b3b2f15448c3f2a254fc10cbc2b92
165,582
linux
1
https://github.com/torvalds/linux
2014-01-06 17:40:02+01:00
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { struct net *net = nf_ct_net(ct); struct dccp_net *dn; struct dccp_hdr _dh, *dh; const char *msg; u_int8_t state; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh); BUG_ON(dh == NULL); state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; switch (state) { default: dn = dccp_pernet(net); if (dn->dccp_loose == 0) { msg = "nf_ct_dccp: not picking up existing connection "; goto out_invalid; } case CT_DCCP_REQUEST: break; case CT_DCCP_INVALID: msg = "nf_ct_dccp: invalid state transition "; goto out_invalid; } ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.state = CT_DCCP_NONE; ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST; ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL; ct->proto.dccp.handshake_seq = 0; return true; out_invalid: if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "%s", msg); return false; }
CVE-2014-2523
CWE-20
https://github.com/torvalds/linux/commit/b22f5126a24b3b2f15448c3f2a254fc10cbc2b92
Low
3,476
14,378
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,378
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/ntp_snippets/remote/prefetched_pages_tracker_impl.h" #include "base/bind.h" #include "base/strings/string_number_conversions.h" #include "base/strings/stringprintf.h" #include "base/test/mock_callback.h" #include "components/ntp_snippets/offline_pages/offline_pages_test_utils.h" #include "components/offline_pages/core/client_namespace_constants.h" #include "components/offline_pages/core/stub_offline_page_model.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" using ntp_snippets::test::FakeOfflinePageModel; using offline_pages::MultipleOfflinePageItemCallback; using offline_pages::OfflinePageItem; using testing::_; using testing::Eq; using testing::SaveArg; using testing::StrictMock; namespace ntp_snippets { namespace { const int64_t kSystemDownloadId = 0; class MockOfflinePageModel : public offline_pages::StubOfflinePageModel { public: ~MockOfflinePageModel() override = default; MOCK_METHOD2(GetPagesByNamespace, void(const std::string& name_space, const MultipleOfflinePageItemCallback& callback)); }; OfflinePageItem CreateOfflinePageItem(const GURL& url, const std::string& name_space) { static int id = 0; ++id; return OfflinePageItem( url, id, offline_pages::ClientId(name_space, base::IntToString(id)), base::FilePath::FromUTF8Unsafe( base::StringPrintf("some/folder/%d.mhtml", id)), 0, base::Time::Now()); } } // namespace class PrefetchedPagesTrackerImplTest : public ::testing::Test { public: PrefetchedPagesTrackerImplTest() = default; FakeOfflinePageModel* fake_offline_page_model() { return &fake_offline_page_model_; } MockOfflinePageModel* mock_offline_page_model() { return &mock_offline_page_model_; } private: FakeOfflinePageModel fake_offline_page_model_; StrictMock<MockOfflinePageModel> mock_offline_page_model_; DISALLOW_COPY_AND_ASSIGN(PrefetchedPagesTrackerImplTest); }; TEST_F(PrefetchedPagesTrackerImplTest, ShouldRetrievePrefetchedEarlierSuggestionsOnInitialize) { (*fake_offline_page_model()->mutable_items()) = { CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace)}; PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_FALSE( tracker.PrefetchedOfflinePageExists(GURL("http://not_added_url.com"))); EXPECT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldAddNewPrefetchedPagesWhenNotified) { fake_offline_page_model()->mutable_items()->clear(); PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_FALSE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); tracker.OfflinePageAdded( fake_offline_page_model(), CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace)); EXPECT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldIgnoreOtherTypesOfOfflinePagesWhenNotified) { fake_offline_page_model()->mutable_items()->clear(); PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_FALSE(tracker.PrefetchedOfflinePageExists( GURL("http://manually_downloaded.com"))); tracker.OfflinePageAdded( fake_offline_page_model(), CreateOfflinePageItem(GURL("http://manually_downloaded.com"), offline_pages::kNTPSuggestionsNamespace)); EXPECT_FALSE(tracker.PrefetchedOfflinePageExists( GURL("http://manually_downloaded.com"))); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldIgnoreOtherTypesOfOfflinePagesOnStartup) { (*fake_offline_page_model()->mutable_items()) = { CreateOfflinePageItem(GURL("http://manually_downloaded.com"), offline_pages::kNTPSuggestionsNamespace)}; PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_FALSE(tracker.PrefetchedOfflinePageExists( GURL("http://manually_downloaded.com"))); EXPECT_FALSE(tracker.PrefetchedOfflinePageExists( GURL("http://manually_downloaded.com"))); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldDeletePrefetchedURLWhenNotified) { const OfflinePageItem item = CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace); (*fake_offline_page_model()->mutable_items()) = {item}; PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); tracker.OfflinePageDeleted(offline_pages::OfflinePageModel::DeletedPageInfo( item.offline_id, kSystemDownloadId, item.client_id, /*request_origin=*/"")); EXPECT_FALSE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldIgnoreDeletionOfOtherTypeOfflinePagesWhenNotified) { const OfflinePageItem prefetched_item = CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace); // The URL is intentionally the same. const OfflinePageItem manually_downloaded_item = CreateOfflinePageItem( GURL("http://prefetched.com"), offline_pages::kNTPSuggestionsNamespace); (*fake_offline_page_model()->mutable_items()) = {prefetched_item, manually_downloaded_item}; PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); tracker.OfflinePageDeleted(offline_pages::OfflinePageModel::DeletedPageInfo( manually_downloaded_item.offline_id, kSystemDownloadId, manually_downloaded_item.client_id, /*request_origin=*/"")); EXPECT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldReportAsNotInitializedBeforeReceivedArticles) { EXPECT_CALL( *mock_offline_page_model(), GetPagesByNamespace(offline_pages::kSuggestedArticlesNamespace, _)); PrefetchedPagesTrackerImpl tracker(mock_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); EXPECT_FALSE(tracker.IsInitialized()); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldReportAsInitializedAfterInitialization) { MultipleOfflinePageItemCallback offline_pages_callback; EXPECT_CALL( *mock_offline_page_model(), GetPagesByNamespace(offline_pages::kSuggestedArticlesNamespace, _)) .WillOnce(SaveArg<1>(&offline_pages_callback)); PrefetchedPagesTrackerImpl tracker(mock_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_FALSE(tracker.IsInitialized()); offline_pages_callback.Run(std::vector<OfflinePageItem>()); EXPECT_TRUE(tracker.IsInitialized()); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldCallCallbackAfterInitialization) { MultipleOfflinePageItemCallback offline_pages_callback; EXPECT_CALL( *mock_offline_page_model(), GetPagesByNamespace(offline_pages::kSuggestedArticlesNamespace, _)) .WillOnce(SaveArg<1>(&offline_pages_callback)); PrefetchedPagesTrackerImpl tracker(mock_offline_page_model()); base::MockCallback<base::OnceCallback<void()>> mock_initialization_completed_callback; tracker.Initialize(mock_initialization_completed_callback.Get()); EXPECT_CALL(mock_initialization_completed_callback, Run()); offline_pages_callback.Run(std::vector<OfflinePageItem>()); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldCallMultipleCallbacksAfterInitialization) { MultipleOfflinePageItemCallback offline_pages_callback; EXPECT_CALL( *mock_offline_page_model(), GetPagesByNamespace(offline_pages::kSuggestedArticlesNamespace, _)) .WillOnce(SaveArg<1>(&offline_pages_callback)); PrefetchedPagesTrackerImpl tracker(mock_offline_page_model()); base::MockCallback<base::OnceCallback<void()>> first_mock_initialization_completed_callback, second_mock_initialization_completed_callback; tracker.Initialize(first_mock_initialization_completed_callback.Get()); tracker.Initialize(second_mock_initialization_completed_callback.Get()); EXPECT_CALL(first_mock_initialization_completed_callback, Run()); EXPECT_CALL(second_mock_initialization_completed_callback, Run()); offline_pages_callback.Run(std::vector<OfflinePageItem>()); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldCallCallbackImmediatelyIfAlreadyInitialiazed) { MultipleOfflinePageItemCallback offline_pages_callback; EXPECT_CALL( *mock_offline_page_model(), GetPagesByNamespace(offline_pages::kSuggestedArticlesNamespace, _)) .WillOnce(SaveArg<1>(&offline_pages_callback)); PrefetchedPagesTrackerImpl tracker(mock_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); offline_pages_callback.Run(std::vector<OfflinePageItem>()); base::MockCallback<base::OnceCallback<void()>> mock_initialization_completed_callback; EXPECT_CALL(mock_initialization_completed_callback, Run()); tracker.Initialize(mock_initialization_completed_callback.Get()); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldKeepPrefetchedURLAfterDuplicatePageDeleted) { const OfflinePageItem first_item = CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace); const OfflinePageItem second_item = CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace); (*fake_offline_page_model()->mutable_items()) = {first_item, second_item}; PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); tracker.OfflinePageDeleted(offline_pages::OfflinePageModel::DeletedPageInfo( first_item.offline_id, kSystemDownloadId, first_item.client_id, /*request_origin=*/"")); // Only one offline page (out of two) has been removed, the remaining one // should be reported here. EXPECT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); } TEST_F(PrefetchedPagesTrackerImplTest, ShouldDeletePrefetchedURLAfterAllItsPagesAreDeleted) { const OfflinePageItem first_item = CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace); const OfflinePageItem second_item = CreateOfflinePageItem(GURL("http://prefetched.com"), offline_pages::kSuggestedArticlesNamespace); (*fake_offline_page_model()->mutable_items()) = {first_item, second_item}; PrefetchedPagesTrackerImpl tracker(fake_offline_page_model()); tracker.Initialize(base::BindOnce([] {})); ASSERT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); tracker.OfflinePageDeleted(offline_pages::OfflinePageModel::DeletedPageInfo( first_item.offline_id, kSystemDownloadId, first_item.client_id, /*request_origin=*/"")); ASSERT_TRUE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); tracker.OfflinePageDeleted(offline_pages::OfflinePageModel::DeletedPageInfo( second_item.offline_id, kSystemDownloadId, second_item.client_id, /*request_origin=*/"")); // All offline pages have been removed, their absence should be reported here. EXPECT_FALSE( tracker.PrefetchedOfflinePageExists(GURL("http://prefetched.com"))); } } // namespace ntp_snippets
null
null
null
null
11,241
39,639
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
204,634
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Scheduler code and data structures related to cpufreq. * * Copyright (C) 2016, Intel Corporation * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "sched.h" DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); /** * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer. * @cpu: The CPU to set the pointer for. * @data: New pointer value. * @func: Callback function to set for the CPU. * * Set and publish the update_util_data pointer for the given CPU. * * The update_util_data pointer of @cpu is set to @data and the callback * function pointer in the target struct update_util_data is set to @func. * That function will be called by cpufreq_update_util() from RCU-sched * read-side critical sections, so it must not sleep. @data will always be * passed to it as the first argument which allows the function to get to the * target update_util_data structure and its container. * * The update_util_data pointer of @cpu must be NULL when this function is * called or it will WARN() and return with no effect. */ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)) { if (WARN_ON(!data || !func)) return; if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu))) return; data->func = func; rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data); } EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook); /** * cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer. * @cpu: The CPU to clear the pointer for. * * Clear the update_util_data pointer for the given CPU. * * Callers must use RCU-sched callbacks to free any memory that might be * accessed via the old update_util_data pointer or invoke synchronize_sched() * right after this function to avoid use-after-free. */ void cpufreq_remove_update_util_hook(int cpu) { rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL); } EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
null
null
null
null
112,981
57,531
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
57,531
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/input_method/candidate_window_controller.h" #include "chrome/browser/chromeos/input_method/candidate_window_controller_impl.h" namespace chromeos { namespace input_method { // static CandidateWindowController* CandidateWindowController::CreateCandidateWindowController() { return new CandidateWindowControllerImpl; } } // namespace input_method } // namespace chromeos
null
null
null
null
54,394
1,425
null
train_val
83ed75feba32e46f736fcce0d96a0445f29b96c2
163,269
krb5
0
https://github.com/krb5/krb5
2016-01-27 15:43:28-05:00
/* lib/crypto/crypto_tests/camellia-test.c */ /* * Copyright (c) 2009 * NTT (Nippon Telegraph and Telephone Corporation) . All rights reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. * * * Subset of NIST tests for AES as applied to Camellia; specifically, the * variable-key and variable-text tests for 128- and 256-bit keys. */ #include <stdio.h> #include "crypto_int.h" static char key[32]; static char plain[16], cipher[16], zero[16]; static krb5_keyblock enc_key; static krb5_data ivec; static void init() { enc_key.contents = (unsigned char *)key; enc_key.length = 16; ivec.data = zero; ivec.length = 16; } static void enc() { krb5_key k; krb5_crypto_iov iov; krb5_data cdata = make_data(cipher, 16); iov.flags = KRB5_CRYPTO_TYPE_DATA; iov.data = make_data(plain, 16); krb5_k_create_key(NULL, &enc_key, &k); /* cbc-mac is the same as block encryption for a single block. */ krb5int_camellia_cbc_mac(k, &iov, 1, &ivec, &cdata); krb5_k_free_key(NULL, k); } static void hexdump(const char *label, const char *cp, int len) { printf("%s=", label); while (len--) printf("%02X", 0xff & *cp++); printf("\n"); } static void set_bit(char *ptr, int bitnum) { int bytenum; bytenum = bitnum / 8; bitnum %= 8; /* First bit is the high bit! */ ptr[bytenum] = 1 << (7 - bitnum); } /* Variable-Key tests */ static void vk_test_1(int len) { int i; enc_key.length = len; printf("\nKEYSIZE=%d\n\n", len * 8); memset(plain, 0, sizeof(plain)); hexdump("PT", plain, 16); for (i = 0; i < len * 8; i++) { memset(key, 0, len); set_bit(key, i); printf("\nI=%d\n", i+1); hexdump("KEY", key, len); enc(); hexdump("CT", cipher, 16); } printf("\n==========\n"); } static void vk_test() { vk_test_1(16); vk_test_1(32); } /* Variable-Text tests */ static void vt_test_1(int len) { int i; enc_key.length = len; printf("\nKEYSIZE=%d\n\n", len * 8); memset(key, 0, len); hexdump("KEY", key, len); for (i = 0; i < 16 * 8; i++) { memset(plain, 0, sizeof(plain)); set_bit(plain, i); printf("\nI=%d\n", i+1); hexdump("PT", plain, 16); enc(); hexdump("CT", cipher, 16); } printf("\n==========\n"); } static void vt_test() { vt_test_1(16); vt_test_1(32); } int main (int argc, char *argv[]) { if (argc > 2 || (argc == 2 && strcmp(argv[1], "-k"))) { fprintf(stderr, "usage:\t%s -k\tfor variable-key tests\n" " or:\t%s \tfor variable-plaintext tests\n", argv[0], argv[0]); return 1; } init(); if (argc == 2) vk_test(); else vt_test(); return 0; }
null
null
null
null
74,577
34,003
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
34,003
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/layout/ng/ng_physical_box_fragment.h" #include "third_party/blink/renderer/core/editing/position_with_affinity.h" #include "third_party/blink/renderer/core/layout/layout_block.h" #include "third_party/blink/renderer/core/layout/layout_box.h" #include "third_party/blink/renderer/core/layout/layout_object.h" namespace blink { NGPhysicalBoxFragment::NGPhysicalBoxFragment( LayoutObject* layout_object, const ComputedStyle& style, NGPhysicalSize size, Vector<scoped_refptr<NGPhysicalFragment>>& children, const NGPixelSnappedPhysicalBoxStrut& padding, const NGPhysicalOffsetRect& contents_visual_rect, Vector<NGBaseline>& baselines, NGBoxType box_type, bool is_old_layout_root, unsigned border_edges, // NGBorderEdges::Physical scoped_refptr<NGBreakToken> break_token) : NGPhysicalContainerFragment(layout_object, style, size, kFragmentBox, box_type, children, contents_visual_rect, std::move(break_token)), baselines_(std::move(baselines)), padding_(padding) { DCHECK(baselines.IsEmpty()); // Ensure move semantics is used. is_old_layout_root_ = is_old_layout_root; border_edge_ = border_edges; } const NGBaseline* NGPhysicalBoxFragment::Baseline( const NGBaselineRequest& request) const { for (const auto& baseline : baselines_) { if (baseline.request == request) return &baseline; } return nullptr; } bool NGPhysicalBoxFragment::HasSelfPaintingLayer() const { const LayoutObject* layout_object = GetLayoutObject(); DCHECK(layout_object); DCHECK(layout_object->IsBoxModelObject()); return ToLayoutBoxModelObject(layout_object)->HasSelfPaintingLayer(); } bool NGPhysicalBoxFragment::ChildrenInline() const { const LayoutObject* layout_object = GetLayoutObject(); DCHECK(layout_object); return layout_object->ChildrenInline(); } bool NGPhysicalBoxFragment::HasOverflowClip() const { const LayoutObject* layout_object = GetLayoutObject(); DCHECK(layout_object); return layout_object->HasOverflowClip(); } bool NGPhysicalBoxFragment::ShouldClipOverflow() const { const LayoutObject* layout_object = GetLayoutObject(); DCHECK(layout_object); return layout_object->IsBox() && ToLayoutBox(layout_object)->ShouldClipOverflow(); } NGPhysicalOffsetRect NGPhysicalBoxFragment::SelfVisualRect() const { const ComputedStyle& style = Style(); if (!style.HasVisualOverflowingEffect()) return {{}, Size()}; LayoutObject* layout_object = GetLayoutObject(); DCHECK(layout_object); if (layout_object->IsBox()) { LayoutRect visual_rect({}, Size().ToLayoutSize()); visual_rect.Expand(style.BoxDecorationOutsets()); if (style.HasOutline()) { Vector<LayoutRect> outline_rects; // The result rects are in coordinates of this object's border box. AddSelfOutlineRects(&outline_rects, LayoutPoint()); LayoutRect rect = UnionRectEvenIfEmpty(outline_rects); rect.Inflate(style.OutlineOutsetExtent()); visual_rect.Unite(rect); } return NGPhysicalOffsetRect(visual_rect); } // TODO(kojii): Implement for inline boxes. DCHECK(layout_object->IsLayoutInline()); LayoutRect visual_rect({}, Size().ToLayoutSize()); visual_rect.Expand(style.BoxDecorationOutsets()); return NGPhysicalOffsetRect(visual_rect); } void NGPhysicalBoxFragment::AddSelfOutlineRects( Vector<LayoutRect>* outline_rects, const LayoutPoint& additional_offset) const { DCHECK(outline_rects); // TODO(kojii): Implement. This is quite incomplete yet. // bool include_block_visual_overflow = // layout_object->OutlineRectsShouldIncludeBlockVisualOverflow(); // LayoutRect outline_rect(additional_offset, Size().ToLayoutSize()); // LayoutRect outline_rect = VisualRect(); // outline_rect.MoveBy(additional_offset); // outline_rect.Inflate(-Style().OutlineOffset()); // outline_rect.Inflate(-Style().OutlineWidth()); outline_rects->push_back(outline_rect); } NGPhysicalOffsetRect NGPhysicalBoxFragment::VisualRectWithContents() const { if (HasOverflowClip() || Style().HasMask()) return SelfVisualRect(); NGPhysicalOffsetRect visual_rect = SelfVisualRect(); visual_rect.Unite(ContentsVisualRect()); return visual_rect; } PositionWithAffinity NGPhysicalBoxFragment::PositionForPoint( const NGPhysicalOffset& point) const { if (!IsBlockFlow()) return PositionForPointInInlineLevelBox(point); return PositionForPointInInlineFormattingContext(point); } scoped_refptr<NGPhysicalFragment> NGPhysicalBoxFragment::CloneWithoutOffset() const { Vector<scoped_refptr<NGPhysicalFragment>> children_copy(children_); Vector<NGBaseline> baselines_copy(baselines_); scoped_refptr<NGPhysicalFragment> physical_fragment = base::AdoptRef(new NGPhysicalBoxFragment( layout_object_, Style(), size_, children_copy, padding_, contents_visual_rect_, baselines_copy, BoxType(), is_old_layout_root_, border_edge_, break_token_)); return physical_fragment; } } // namespace blink
null
null
null
null
30,866
25,592
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
190,587
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2016 BayLibre, SAS * Author: Neil Armstrong <narmstrong@baylibre.com> * Copyright (C) 2014 Endless Mobile * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ /* Canvas LUT Memory */ #ifndef __MESON_CANVAS_H #define __MESON_CANVAS_H #define MESON_CANVAS_ID_OSD1 0x4e /* Canvas configuration. */ #define MESON_CANVAS_WRAP_NONE 0x00 #define MESON_CANVAS_WRAP_X 0x01 #define MESON_CANVAS_WRAP_Y 0x02 #define MESON_CANVAS_BLKMODE_LINEAR 0x00 #define MESON_CANVAS_BLKMODE_32x32 0x01 #define MESON_CANVAS_BLKMODE_64x64 0x02 void meson_canvas_setup(struct meson_drm *priv, uint32_t canvas_index, uint32_t addr, uint32_t stride, uint32_t height, unsigned int wrap, unsigned int blkmode); #endif /* __MESON_CANVAS_H */
null
null
null
null
98,934
56,442
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
56,442
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/login/users/mock_user_manager.h" #include <utility> #include "base/task_runner.h" #include "chrome/browser/chromeos/login/users/fake_supervised_user_manager.h" #include "chrome/browser/chromeos/profiles/profile_helper.h" namespace { class FakeTaskRunner : public base::TaskRunner { public: bool PostDelayedTask(const base::Location& from_here, base::OnceClosure task, base::TimeDelta delay) override { std::move(task).Run(); return true; } bool RunsTasksInCurrentSequence() const override { return true; } protected: ~FakeTaskRunner() override {} }; } // namespace namespace chromeos { MockUserManager::MockUserManager() : ChromeUserManager(new FakeTaskRunner()), user_flow_(new DefaultUserFlow()), supervised_user_manager_(new FakeSupervisedUserManager()) { ProfileHelper::SetProfileToUserForTestingEnabled(true); } MockUserManager::~MockUserManager() { ProfileHelper::SetProfileToUserForTestingEnabled(false); ClearUserList(); } const user_manager::UserList& MockUserManager::GetUsers() const { return user_list_; } user_manager::UserList MockUserManager::GetUnlockUsers() const { return user_list_; } const AccountId& MockUserManager::GetOwnerAccountId() const { return GetActiveUser()->GetAccountId(); } const user_manager::User* MockUserManager::GetActiveUser() const { return user_list_.empty() ? nullptr : user_list_.front(); } user_manager::User* MockUserManager::GetActiveUser() { return user_list_.empty() ? nullptr : user_list_.front(); } const user_manager::User* MockUserManager::GetPrimaryUser() const { return GetActiveUser(); } MultiProfileUserController* MockUserManager::GetMultiProfileUserController() { return nullptr; } UserImageManager* MockUserManager::GetUserImageManager( const AccountId& account_id) { return nullptr; } SupervisedUserManager* MockUserManager::GetSupervisedUserManager() { return supervised_user_manager_.get(); } void MockUserManager::ScheduleResolveLocale( const std::string& locale, base::OnceClosure on_resolved_callback, std::string* out_resolved_locale) const { DoScheduleResolveLocale(locale, &on_resolved_callback, out_resolved_locale); } // Creates a new User instance. void MockUserManager::SetActiveUser(const AccountId& account_id) { ClearUserList(); AddUser(account_id); } UserFlow* MockUserManager::GetCurrentUserFlow() const { return user_flow_.get(); } UserFlow* MockUserManager::GetUserFlow(const AccountId&) const { return user_flow_.get(); } user_manager::User* MockUserManager::CreatePublicAccountUser( const AccountId& account_id) { ClearUserList(); user_manager::User* user = user_manager::User::CreatePublicAccountUser(account_id); user_list_.push_back(user); ProfileHelper::Get()->SetProfileToUserMappingForTesting(user); return user_list_.back(); } user_manager::User* MockUserManager::CreateKioskAppUser( const AccountId& account_id) { ClearUserList(); user_list_.push_back(user_manager::User::CreateKioskAppUser(account_id)); ProfileHelper::Get()->SetProfileToUserMappingForTesting(user_list_.back()); return user_list_.back(); } void MockUserManager::AddUser(const AccountId& account_id) { AddUserWithAffiliation(account_id, false); } void MockUserManager::AddUserWithAffiliation(const AccountId& account_id, bool is_affiliated) { user_manager::User* user = user_manager::User::CreateRegularUser( account_id, user_manager::USER_TYPE_REGULAR); user->SetAffiliation(is_affiliated); user_list_.push_back(user); ProfileHelper::Get()->SetProfileToUserMappingForTesting(user); } void MockUserManager::ClearUserList() { // Can't use STLDeleteElements because of the protected destructor of User. for (user_manager::UserList::iterator user = user_list_.begin(); user != user_list_.end(); ++user) delete *user; user_list_.clear(); } bool MockUserManager::ShouldReportUser(const std::string& user_id) const { for (auto* user : user_list_) { if (user->GetAccountId().GetUserEmail() == user_id) return user->IsAffiliated(); } NOTREACHED(); return false; } } // namespace chromeos
null
null
null
null
53,305
36,354
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
36,354
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2014 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/renderer/core/xml/parser/shared_buffer_reader.h" #include <algorithm> #include <cstdlib> #include "testing/gtest/include/gtest/gtest.h" #include "third_party/blink/renderer/platform/shared_buffer.h" namespace blink { TEST(SharedBufferReaderTest, readDataWithNullSharedBuffer) { SharedBufferReader reader(nullptr); char buffer[32]; EXPECT_EQ(0, reader.ReadData(buffer, sizeof(buffer))); } TEST(SharedBufferReaderTest, readDataWith0BytesRequest) { scoped_refptr<SharedBuffer> shared_buffer = SharedBuffer::Create(); SharedBufferReader reader(shared_buffer); EXPECT_EQ(0, reader.ReadData(nullptr, 0)); } TEST(SharedBufferReaderTest, readDataWithSizeBiggerThanSharedBufferSize) { static const char kTestData[] = "hello"; scoped_refptr<SharedBuffer> shared_buffer = SharedBuffer::Create(kTestData, sizeof(kTestData)); SharedBufferReader reader(shared_buffer); const int kExtraBytes = 3; char output_buffer[sizeof(kTestData) + kExtraBytes]; const char kInitializationByte = 'a'; memset(output_buffer, kInitializationByte, sizeof(output_buffer)); EXPECT_EQ(sizeof(kTestData), static_cast<size_t>(reader.ReadData( output_buffer, sizeof(output_buffer)))); EXPECT_TRUE( std::equal(kTestData, kTestData + sizeof(kTestData), output_buffer)); // Check that the bytes past index sizeof(testData) were not touched. EXPECT_EQ(kExtraBytes, std::count(output_buffer, output_buffer + sizeof(output_buffer), kInitializationByte)); } TEST(SharedBufferReaderTest, readDataInMultiples) { const int kIterationsCount = 8; const int kBytesPerIteration = 64; Vector<char> test_data(kIterationsCount * kBytesPerIteration); std::generate(test_data.begin(), test_data.end(), &std::rand); scoped_refptr<SharedBuffer> shared_buffer = SharedBuffer::Create(&test_data[0], test_data.size()); SharedBufferReader reader(shared_buffer); Vector<char> destination_vector(test_data.size()); for (int i = 0; i < kIterationsCount; ++i) { const int offset = i * kBytesPerIteration; const int bytes_read = reader.ReadData(&destination_vector[0] + offset, kBytesPerIteration); EXPECT_EQ(kBytesPerIteration, bytes_read); } EXPECT_TRUE(std::equal(test_data.begin(), test_data.end(), destination_vector.begin())); } TEST(SharedBufferReaderTest, clearSharedBufferBetweenCallsToReadData) { Vector<char> test_data(128); std::generate(test_data.begin(), test_data.end(), &std::rand); scoped_refptr<SharedBuffer> shared_buffer = SharedBuffer::Create(&test_data[0], test_data.size()); SharedBufferReader reader(shared_buffer); Vector<char> destination_vector(test_data.size()); const int bytes_to_read = test_data.size() / 2; EXPECT_EQ(bytes_to_read, reader.ReadData(&destination_vector[0], bytes_to_read)); shared_buffer->Clear(); EXPECT_EQ(0, reader.ReadData(&destination_vector[0], bytes_to_read)); } } // namespace blink
null
null
null
null
33,217
19,054
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
19,054
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/sync_sessions/synced_session_tracker.h" #include "base/rand_util.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "components/sessions/core/serialized_navigation_entry_test_helper.h" #include "components/sync_sessions/mock_sync_sessions_client.h" #include "components/sync_sessions/synced_tab_delegate.h" #include "testing/gtest/include/gtest/gtest.h" using testing::AssertionFailure; using testing::AssertionResult; using testing::AssertionSuccess; using testing::ElementsAre; using testing::IsEmpty; using testing::IsNull; using testing::NotNull; namespace sync_sessions { namespace { const char kValidUrl[] = "http://www.example.com"; const char kSessionName[] = "sessionname"; const sync_pb::SyncEnums::DeviceType kDeviceType = sync_pb::SyncEnums_DeviceType_TYPE_PHONE; const char kTag[] = "tag"; const char kTag2[] = "tag2"; const char kTag3[] = "tag3"; const char kTitle[] = "title"; const int kTabNode1 = 1; const int kTabNode2 = 2; const int kTabNode3 = 3; const SessionID kWindow1 = SessionID::FromSerializedValue(1); const SessionID kWindow2 = SessionID::FromSerializedValue(2); const SessionID kWindow3 = SessionID::FromSerializedValue(3); const SessionID kTab1 = SessionID::FromSerializedValue(15); const SessionID kTab2 = SessionID::FromSerializedValue(25); const SessionID kTab3 = SessionID::FromSerializedValue(35); const SessionID kTab4 = SessionID::FromSerializedValue(45); const SessionID kTab5 = SessionID::FromSerializedValue(55); const SessionID kTab6 = SessionID::FromSerializedValue(65); const SessionID kTab7 = SessionID::FromSerializedValue(75); MATCHER_P(HasSessionTag, expected_tag, "") { return arg->session_tag == expected_tag; } } // namespace class SyncedSessionTrackerTest : public testing::Test { public: SyncedSessionTrackerTest() : tracker_(&sessions_client_) {} ~SyncedSessionTrackerTest() override {} SyncedSessionTracker* GetTracker() { return &tracker_; } TabNodePool* GetTabNodePool() { return &tracker_.local_tab_pool_; } // Verify that each tab within a session is allocated one SessionTab object, // and that that tab object is owned either by the Session itself or the // |unmapped_tabs_| tab holder. AssertionResult VerifyTabIntegrity(const std::string& session_tag) { const SyncedSessionTracker::TrackedSession* session = tracker_.LookupTrackedSession(session_tag); if (!session) { return AssertionFailure() << "Not tracked session with tag " << session_tag; } // First get all the tabs associated with this session. int total_tab_count = session->synced_tab_map.size(); // Now traverse the SyncedSession tree to verify the mapped tabs all match // up. int mapped_tab_count = 0; for (auto& window_pair : session->synced_session.windows) { mapped_tab_count += window_pair.second->wrapped_window.tabs.size(); for (auto& tab : window_pair.second->wrapped_window.tabs) { const auto tab_map_it = session->synced_tab_map.find(tab->tab_id); if (tab_map_it == session->synced_tab_map.end()) { return AssertionFailure() << "Tab ID " << tab->tab_id.id() << " has no corresponding synced tab entry"; } if (tab_map_it->second != tab.get()) { return AssertionFailure() << "Mapped tab " << tab->tab_id.id() << " does not match synced tab map " << tab->tab_id.id(); } } } // Wrap up by verifying all unmapped tabs are tracked. int unmapped_tab_count = session->unmapped_tabs.size(); for (const auto& tab_pair : session->unmapped_tabs) { if (tab_pair.first != tab_pair.second->tab_id) { return AssertionFailure() << "Unmapped tab " << tab_pair.second->tab_id.id() << " associated with wrong tab " << tab_pair.first; } const auto tab_map_it = session->synced_tab_map.find(tab_pair.second->tab_id); if (tab_map_it == session->synced_tab_map.end()) { return AssertionFailure() << "Unmapped tab " << tab_pair.second->tab_id << " has no corresponding synced tab entry"; } if (tab_map_it->second != tab_pair.second.get()) { return AssertionFailure() << "Unmapped tab " << tab_pair.second->tab_id.id() << " does not match synced tab map " << tab_map_it->second; } } return mapped_tab_count + unmapped_tab_count == total_tab_count ? AssertionSuccess() : AssertionFailure() << " Tab count mismatch. Total: " << total_tab_count << ". Mapped + Unmapped: " << mapped_tab_count << " + " << unmapped_tab_count; } MockSyncSessionsClient* GetSyncSessionsClient() { return &sessions_client_; } private: testing::NiceMock<MockSyncSessionsClient> sessions_client_; SyncedSessionTracker tracker_; }; TEST_F(SyncedSessionTrackerTest, GetSession) { SyncedSession* session1 = GetTracker()->GetSession(kTag); SyncedSession* session2 = GetTracker()->GetSession(kTag2); ASSERT_EQ(session1, GetTracker()->GetSession(kTag)); ASSERT_NE(session1, session2); // Should clean up memory on its own. } TEST_F(SyncedSessionTrackerTest, GetTabUnmapped) { sessions::SessionTab* tab = GetTracker()->GetTab(kTag, kTab1); ASSERT_EQ(tab, GetTracker()->GetTab(kTag, kTab1)); // Should clean up memory on its own. } TEST_F(SyncedSessionTrackerTest, PutWindowInSession) { GetTracker()->PutWindowInSession(kTag, kWindow1); SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(1U, session->windows.size()); // Doing it again should have no effect. GetTracker()->PutWindowInSession(kTag, kWindow1); ASSERT_EQ(1U, session->windows.size()); // Should clean up memory on its own. } TEST_F(SyncedSessionTrackerTest, PutTabInWindow) { GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(1U, session->windows.size()); ASSERT_EQ(1U, session->windows[kWindow1]->wrapped_window.tabs.size()); ASSERT_EQ(GetTracker()->GetTab(kTag, kTab1), session->windows[kWindow1]->wrapped_window.tabs[0].get()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); // Should clean up memory on its own. } TEST_F(SyncedSessionTrackerTest, LookupAllForeignSessions) { const char kInvalidUrl[] = "invalid.url"; ON_CALL(*GetSyncSessionsClient(), ShouldSyncURL(GURL(kInvalidUrl))) .WillByDefault(testing::Return(false)); EXPECT_THAT( GetTracker()->LookupAllForeignSessions(SyncedSessionTracker::PRESENTABLE), IsEmpty()); GetTracker()->GetSession(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); sessions::SessionTab* tab = GetTracker()->GetTab(kTag, kTab1); ASSERT_TRUE(tab); tab->navigations.push_back( sessions::SerializedNavigationEntryTestHelper::CreateNavigation(kValidUrl, kTitle)); GetTracker()->GetSession(kTag2); GetTracker()->GetSession(kTag3); GetTracker()->PutWindowInSession(kTag3, kWindow1); GetTracker()->PutTabInWindow(kTag3, kWindow1, kTab1); tab = GetTracker()->GetTab(kTag3, kTab1); ASSERT_TRUE(tab); tab->navigations.push_back( sessions::SerializedNavigationEntryTestHelper::CreateNavigation( kInvalidUrl, kTitle)); // Only the session with a valid window and tab gets returned. EXPECT_THAT( GetTracker()->LookupAllForeignSessions(SyncedSessionTracker::PRESENTABLE), ElementsAre(HasSessionTag(kTag))); EXPECT_THAT(GetTracker()->LookupAllForeignSessions(SyncedSessionTracker::RAW), ElementsAre(HasSessionTag(kTag), HasSessionTag(kTag2), HasSessionTag(kTag3))); } TEST_F(SyncedSessionTrackerTest, LookupSessionWindows) { std::vector<const sessions::SessionWindow*> windows; ASSERT_FALSE(GetTracker()->LookupSessionWindows(kTag, &windows)); GetTracker()->GetSession(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutWindowInSession(kTag, kWindow2); GetTracker()->GetSession(kTag2); GetTracker()->PutWindowInSession(kTag2, kWindow1); GetTracker()->PutWindowInSession(kTag2, kWindow2); ASSERT_TRUE(GetTracker()->LookupSessionWindows(kTag, &windows)); ASSERT_EQ(2U, windows.size()); // Only windows from kTag session. ASSERT_NE((sessions::SessionWindow*)nullptr, windows[0]); ASSERT_NE((sessions::SessionWindow*)nullptr, windows[1]); ASSERT_NE(windows[1], windows[0]); } TEST_F(SyncedSessionTrackerTest, LookupSessionTab) { ASSERT_THAT(GetTracker()->LookupSessionTab(kTag, SessionID::InvalidValue()), IsNull()); ASSERT_THAT(GetTracker()->LookupSessionTab(kTag, kTab1), IsNull()); GetTracker()->GetSession(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); ASSERT_THAT(GetTracker()->LookupSessionTab(kTag, kTab1), NotNull()); } TEST_F(SyncedSessionTrackerTest, Complex) { std::vector<sessions::SessionTab *> tabs1, tabs2; sessions::SessionTab* temp_tab; ASSERT_TRUE(GetTracker()->Empty()); ASSERT_EQ(0U, GetTracker()->num_synced_sessions()); ASSERT_EQ(0U, GetTracker()->num_synced_tabs(kTag)); tabs1.push_back(GetTracker()->GetTab(kTag, kTab1)); tabs1.push_back(GetTracker()->GetTab(kTag, kTab2)); tabs1.push_back(GetTracker()->GetTab(kTag, kTab3)); ASSERT_EQ(3U, GetTracker()->num_synced_tabs(kTag)); ASSERT_EQ(1U, GetTracker()->num_synced_sessions()); temp_tab = GetTracker()->GetTab(kTag, kTab1); // Already created. ASSERT_EQ(3U, GetTracker()->num_synced_tabs(kTag)); ASSERT_EQ(1U, GetTracker()->num_synced_sessions()); ASSERT_EQ(tabs1[0], temp_tab); tabs2.push_back(GetTracker()->GetTab(kTag2, kTab1)); ASSERT_EQ(1U, GetTracker()->num_synced_tabs(kTag2)); ASSERT_EQ(2U, GetTracker()->num_synced_sessions()); ASSERT_FALSE(GetTracker()->DeleteForeignSession(kTag3)); SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(2U, GetTracker()->num_synced_sessions()); SyncedSession* session2 = GetTracker()->GetSession(kTag2); ASSERT_EQ(2U, GetTracker()->num_synced_sessions()); SyncedSession* session3 = GetTracker()->GetSession(kTag3); session3->device_type = sync_pb::SyncEnums_DeviceType_TYPE_LINUX; ASSERT_EQ(3U, GetTracker()->num_synced_sessions()); ASSERT_TRUE(session); ASSERT_TRUE(session2); ASSERT_TRUE(session3); ASSERT_NE(session, session2); ASSERT_NE(session2, session3); ASSERT_TRUE(GetTracker()->DeleteForeignSession(kTag3)); ASSERT_EQ(2U, GetTracker()->num_synced_sessions()); GetTracker()->PutWindowInSession(kTag, kWindow1); // Create a window. GetTracker()->PutTabInWindow(kTag, kWindow1, kTab3); // No longer unmapped. ASSERT_EQ(3U, GetTracker()->num_synced_tabs(kTag)); // Has not changed. ASSERT_EQ(tabs1[0], GetTracker()->LookupSessionTab(kTag, kTab1)); ASSERT_EQ(tabs1[2], GetTracker()->LookupSessionTab(kTag, kTab3)); ASSERT_THAT(GetTracker()->LookupSessionTab(kTag, kTab4), IsNull()); std::vector<const sessions::SessionWindow*> windows; ASSERT_TRUE(GetTracker()->LookupSessionWindows(kTag, &windows)); ASSERT_EQ(1U, windows.size()); ASSERT_TRUE(GetTracker()->LookupSessionWindows(kTag2, &windows)); ASSERT_EQ(0U, windows.size()); // The sessions don't have valid tabs, lookup should not succeed. std::vector<const SyncedSession*> sessions; EXPECT_THAT( GetTracker()->LookupAllForeignSessions(SyncedSessionTracker::PRESENTABLE), IsEmpty()); EXPECT_THAT(GetTracker()->LookupAllForeignSessions(SyncedSessionTracker::RAW), ElementsAre(HasSessionTag(kTag), HasSessionTag(kTag2))); GetTracker()->Clear(); ASSERT_EQ(0U, GetTracker()->num_synced_tabs(kTag)); ASSERT_EQ(0U, GetTracker()->num_synced_tabs(kTag2)); ASSERT_EQ(0U, GetTracker()->num_synced_sessions()); } TEST_F(SyncedSessionTrackerTest, ManyGetTabs) { ASSERT_TRUE(GetTracker()->Empty()); const int kMaxSessions = 10; const int kMaxTabs = 1000; const int kMaxAttempts = 10000; for (int j = 0; j < kMaxSessions; ++j) { std::string tag = base::StringPrintf("tag%d", j); for (int i = 0; i < kMaxAttempts; ++i) { // More attempts than tabs means we'll sometimes get the same tabs, // sometimes have to allocate new tabs. int rand_tab_num = base::RandInt(0, kMaxTabs); sessions::SessionTab* tab = GetTracker()->GetTab( tag, SessionID::FromSerializedValue(rand_tab_num + 1)); ASSERT_TRUE(tab); } } } TEST_F(SyncedSessionTrackerTest, LookupForeignTabNodeIds) { std::set<int> result; GetTracker()->OnTabNodeSeen(kTag, 1); GetTracker()->OnTabNodeSeen(kTag, 2); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_EQ(2U, result.size()); EXPECT_FALSE(result.end() == result.find(1)); EXPECT_FALSE(result.end() == result.find(2)); GetTracker()->LookupForeignTabNodeIds(kTag2, &result); EXPECT_TRUE(result.empty()); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_EQ(2U, result.size()); GetTracker()->OnTabNodeSeen(kTag, 3); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_EQ(3U, result.size()); EXPECT_FALSE(result.end() == result.find(3)); GetTracker()->OnTabNodeSeen(kTag2, 21); GetTracker()->OnTabNodeSeen(kTag2, 22); GetTracker()->LookupForeignTabNodeIds(kTag2, &result); EXPECT_EQ(2U, result.size()); EXPECT_FALSE(result.end() == result.find(21)); EXPECT_FALSE(result.end() == result.find(22)); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_EQ(3U, result.size()); EXPECT_FALSE(result.end() == result.find(1)); EXPECT_FALSE(result.end() == result.find(2)); GetTracker()->LookupForeignTabNodeIds(kTag3, &result); EXPECT_TRUE(result.empty()); GetTracker()->PutWindowInSession(kTag3, kWindow2); GetTracker()->PutTabInWindow(kTag3, kWindow2, kTab2); GetTracker()->LookupForeignTabNodeIds(kTag3, &result); EXPECT_TRUE(result.empty()); EXPECT_FALSE(GetTracker()->DeleteForeignSession(kTag3)); GetTracker()->LookupForeignTabNodeIds(kTag3, &result); EXPECT_TRUE(result.empty()); EXPECT_FALSE(GetTracker()->DeleteForeignSession(kTag)); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_TRUE(result.empty()); GetTracker()->LookupForeignTabNodeIds(kTag2, &result); EXPECT_EQ(2U, result.size()); EXPECT_FALSE(result.end() == result.find(21)); EXPECT_FALSE(result.end() == result.find(22)); GetTracker()->OnTabNodeSeen(kTag2, 21); GetTracker()->OnTabNodeSeen(kTag2, 23); GetTracker()->LookupForeignTabNodeIds(kTag2, &result); EXPECT_EQ(3U, result.size()); EXPECT_FALSE(result.end() == result.find(21)); EXPECT_FALSE(result.end() == result.find(22)); EXPECT_FALSE(result.end() == result.find(23)); EXPECT_FALSE(GetTracker()->DeleteForeignSession(kTag2)); GetTracker()->LookupForeignTabNodeIds(kTag2, &result); EXPECT_TRUE(result.empty()); } TEST_F(SyncedSessionTrackerTest, SessionTracking) { ASSERT_TRUE(GetTracker()->Empty()); // Create some session information that is stale. SyncedSession* session1 = GetTracker()->GetSession(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab2); GetTracker()->GetTab(kTag, kTab3)->window_id = SessionID::FromSerializedValue(1); // Unmapped. GetTracker()->GetTab(kTag, kTab4)->window_id = SessionID::FromSerializedValue(1); // Unmapped. GetTracker()->PutWindowInSession(kTag, kWindow2); GetTracker()->PutTabInWindow(kTag, kWindow2, kTab5); GetTracker()->PutTabInWindow(kTag, kWindow2, kTab6); ASSERT_EQ(2U, session1->windows.size()); ASSERT_EQ(2U, session1->windows[kWindow1]->wrapped_window.tabs.size()); ASSERT_EQ(2U, session1->windows[kWindow2]->wrapped_window.tabs.size()); ASSERT_EQ(6U, GetTracker()->num_synced_tabs(kTag)); // Create a session that should not be affected. SyncedSession* session2 = GetTracker()->GetSession(kTag2); GetTracker()->PutWindowInSession(kTag2, kWindow3); GetTracker()->PutTabInWindow(kTag2, kWindow3, kTab2); ASSERT_EQ(1U, session2->windows.size()); ASSERT_EQ(1U, session2->windows[kWindow3]->wrapped_window.tabs.size()); ASSERT_EQ(1U, GetTracker()->num_synced_tabs(kTag2)); // Reset tracking and get the current windows/tabs. // We simulate moving a tab from one window to another, then closing the // first window (including its one remaining tab), and opening a new tab // on the remaining window. // New tab, arrived before meta node so unmapped. GetTracker()->GetTab(kTag, kTab7); GetTracker()->ResetSessionTracking(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); // Tab 1 is closed. GetTracker()->PutTabInWindow(kTag, kWindow1, kTab3); // No longer unmapped. // Tab 3 was unmapped and does not get used. GetTracker()->PutTabInWindow(kTag, kWindow1, kTab5); // Moved from window 1. // Window 1 was closed, along with tab 5. GetTracker()->PutTabInWindow(kTag, kWindow1, kTab7); // No longer unmapped. // Session 2 should not be affected. GetTracker()->CleanupSession(kTag); // Verify that only those parts of the session not owned have been removed. ASSERT_EQ(1U, session1->windows.size()); ASSERT_EQ(4U, session1->windows[kWindow1]->wrapped_window.tabs.size()); ASSERT_EQ(1U, session2->windows.size()); ASSERT_EQ(1U, session2->windows[kWindow3]->wrapped_window.tabs.size()); ASSERT_EQ(2U, GetTracker()->num_synced_sessions()); ASSERT_EQ(4U, GetTracker()->num_synced_tabs(kTag)); ASSERT_EQ(1U, GetTracker()->num_synced_tabs(kTag2)); ASSERT_TRUE(VerifyTabIntegrity(kTag)); // All memory should be properly deallocated by destructor for the // SyncedSessionTracker. } TEST_F(SyncedSessionTrackerTest, DeleteForeignTab) { int tab_node_id_1 = 1; int tab_node_id_2 = 2; std::set<int> result; GetTracker()->OnTabNodeSeen(kTag, tab_node_id_1); GetTracker()->OnTabNodeSeen(kTag, tab_node_id_2); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_EQ(2U, result.size()); EXPECT_TRUE(result.find(tab_node_id_1) != result.end()); EXPECT_TRUE(result.find(tab_node_id_2) != result.end()); GetTracker()->DeleteForeignTab(kTag, tab_node_id_1); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_EQ(1U, result.size()); EXPECT_TRUE(result.find(tab_node_id_2) != result.end()); GetTracker()->DeleteForeignTab(kTag, tab_node_id_2); GetTracker()->LookupForeignTabNodeIds(kTag, &result); EXPECT_TRUE(result.empty()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } TEST_F(SyncedSessionTrackerTest, CleanupLocalTabs) { std::set<int> free_node_ids; int tab_node_id = TabNodePool::kInvalidTabNodeID; GetTracker()->InitLocalSession(kTag, kSessionName, kDeviceType); // Start with two restored tab nodes. GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); GetTracker()->ReassociateLocalTab(kTabNode2, kTab2); EXPECT_TRUE(GetTabNodePool()->Empty()); EXPECT_FALSE(GetTabNodePool()->Full()); EXPECT_EQ(2U, GetTabNodePool()->Capacity()); // Associate with no tabs. The tab pool should now be full. GetTracker()->ResetSessionTracking(kTag); GetTracker()->CleanupLocalTabs(&free_node_ids); EXPECT_TRUE(free_node_ids.empty()); EXPECT_TRUE(GetTabNodePool()->Full()); // Associate with only 1 tab open. A tab node should be reused. GetTracker()->ResetSessionTracking(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); EXPECT_TRUE(GetTracker()->GetTabNodeFromLocalTabId(kTab1, &tab_node_id)); GetTracker()->CleanupLocalTabs(&free_node_ids); EXPECT_TRUE(free_node_ids.empty()); // TabNodePool should have one free tab node and one used. EXPECT_EQ(2U, GetTabNodePool()->Capacity()); EXPECT_FALSE(GetTabNodePool()->Empty()); EXPECT_FALSE(GetTabNodePool()->Full()); // Simulate a tab opening, which should use the last free tab node. EXPECT_TRUE(GetTracker()->GetTabNodeFromLocalTabId(kTab2, &tab_node_id)); EXPECT_TRUE(GetTabNodePool()->Empty()); // Simulate another tab opening, which should create a new associated tab // node. EXPECT_FALSE(GetTracker()->GetTabNodeFromLocalTabId(kTab3, &tab_node_id)); EXPECT_EQ(kTabNode3, tab_node_id); EXPECT_EQ(3U, GetTabNodePool()->Capacity()); EXPECT_TRUE(GetTabNodePool()->Empty()); // Fetching the same tab should return the same tab node id. EXPECT_TRUE(GetTracker()->GetTabNodeFromLocalTabId(kTab3, &tab_node_id)); EXPECT_EQ(kTabNode3, tab_node_id); EXPECT_TRUE(GetTabNodePool()->Empty()); // Associate with no tabs. All tabs should be freed again, and the pool // should now be full. GetTracker()->ResetSessionTracking(kTag); GetTracker()->CleanupLocalTabs(&free_node_ids); EXPECT_TRUE(free_node_ids.empty()); EXPECT_TRUE(GetTabNodePool()->Full()); EXPECT_FALSE(GetTabNodePool()->Empty()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } TEST_F(SyncedSessionTrackerTest, ReassociateTabMapped) { std::set<int> free_node_ids; // First create the tab normally. GetTracker()->InitLocalSession(kTag, kSessionName, kDeviceType); EXPECT_FALSE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Map it to a window with the same tab id as it was created with. GetTracker()->ResetSessionTracking(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(1U, session->windows.size()); ASSERT_EQ(1U, session->windows[kWindow1]->wrapped_window.tabs.size()); ASSERT_EQ(GetTracker()->GetTab(kTag, kTab1), session->windows[kWindow1]->wrapped_window.tabs[0].get()); // Then reassociate with a new tab id. GetTracker()->ReassociateLocalTab(kTabNode1, kTab2); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Reset tracking, and put the new tab id into the window. GetTracker()->ResetSessionTracking(kTag); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab2)); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab2); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(free_node_ids.empty()); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); // Now that it's been mapped, it should be accessible both via the // GetSession as well as the GetTab. ASSERT_EQ(GetTracker()->GetTab(kTag, kTab2), session->windows[kWindow1]->wrapped_window.tabs[0].get()); ASSERT_EQ(GetTracker()->GetTabNodeIdsForTesting(kTag).size(), GetTracker()->GetTabNodeIdsForTesting(kTag).count(kTabNode1)); ASSERT_EQ(1U, GetTabNodePool()->Capacity()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } TEST_F(SyncedSessionTrackerTest, ReassociateTabMappedTwice) { std::set<int> free_node_ids; // First create the tab normally. GetTracker()->InitLocalSession(kTag, kSessionName, kDeviceType); EXPECT_FALSE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Map it to a window with the same tab id as it was created with. GetTracker()->ResetSessionTracking(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(free_node_ids.empty()); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(1U, session->windows.size()); ASSERT_EQ(1U, session->windows[kWindow1]->wrapped_window.tabs.size()); EXPECT_EQ(GetTracker()->GetTab(kTag, kTab1), session->windows[kWindow1]->wrapped_window.tabs[0].get()); // Then reassociate with a new tab id. GetTracker()->ReassociateLocalTab(kTabNode1, kTab2); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Tab 1 should no longer be associated with any SessionTab object. At this // point there's no need to verify it's unmapped state. EXPECT_THAT(GetTracker()->LookupSessionTab(kTag, kTab1), IsNull()); // Reset tracking and add back both the old tab and the new tab (both of which // refer to the same tab node id). GetTracker()->ResetSessionTracking(kTag); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab2)); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab2); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(free_node_ids.empty()); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); // Now that it's been mapped, it should be accessible both via the // GetSession as well as the GetTab. EXPECT_EQ(GetTracker()->GetTab(kTag, kTab2), session->windows[kWindow1]->wrapped_window.tabs[1].get()); EXPECT_EQ(GetTracker()->GetTabNodeIdsForTesting(kTag).size(), GetTracker()->GetTabNodeIdsForTesting(kTag).count(kTabNode1)); EXPECT_EQ(1U, GetTabNodePool()->Capacity()); // Attempting to access the original tab will create a new SessionTab object. EXPECT_NE(GetTracker()->GetTab(kTag, kTab1), GetTracker()->GetTab(kTag, kTab2)); int tab_node_id = -1; EXPECT_FALSE(GetTracker()->GetTabNodeFromLocalTabId(kTab1, &tab_node_id)); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } TEST_F(SyncedSessionTrackerTest, ReassociateTabUnmapped) { std::set<int> free_node_ids; // First create the old tab in an unmapped state. GetTracker()->InitLocalSession(kTag, kSessionName, kDeviceType); EXPECT_FALSE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Map it to a window, but reassociated with a new tab id. GetTracker()->ResetSessionTracking(kTag); GetTracker()->ReassociateLocalTab(kTabNode1, kTab2); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab2)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab2); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(free_node_ids.empty()); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); // Now that it's been mapped, it should be accessible both via the // GetSession as well as GetTab. SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(GetTracker()->GetTab(kTag, kTab2), session->windows[kWindow1]->wrapped_window.tabs[0].get()); ASSERT_EQ(GetTracker()->GetTabNodeIdsForTesting(kTag).size(), GetTracker()->GetTabNodeIdsForTesting(kTag).count(kTabNode1)); ASSERT_EQ(1U, GetTabNodePool()->Capacity()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } TEST_F(SyncedSessionTrackerTest, ReassociateTabOldUnmappedNewMapped) { std::set<int> free_node_ids; // First create the old tab in an unmapped state. GetTracker()->InitLocalSession(kTag, kSessionName, kDeviceType); EXPECT_FALSE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Map an unseen tab to a window, then reassociate the existing tab to the // mapped tab id. GetTracker()->ResetSessionTracking(kTag); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab2); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); GetTracker()->ReassociateLocalTab(kTabNode1, kTab2); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(free_node_ids.empty()); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); // Now that it's been mapped, it should be accessible both via the // GetSession as well as GetTab. SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(GetTracker()->GetTab(kTag, kTab2), session->windows[kWindow1]->wrapped_window.tabs[0].get()); ASSERT_EQ(GetTracker()->GetTabNodeIdsForTesting(kTag).size(), GetTracker()->GetTabNodeIdsForTesting(kTag).count(kTabNode1)); ASSERT_EQ(1U, GetTabNodePool()->Capacity()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } TEST_F(SyncedSessionTrackerTest, ReassociateTabSameTabId) { std::set<int> free_node_ids; // First create the tab normally. GetTracker()->InitLocalSession(kTag, kSessionName, kDeviceType); EXPECT_FALSE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Map it to a window. GetTracker()->ResetSessionTracking(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(1U, session->windows.size()); ASSERT_EQ(1U, session->windows[kWindow1]->wrapped_window.tabs.size()); ASSERT_EQ(GetTracker()->GetTab(kTag, kTab1), session->windows[kWindow1]->wrapped_window.tabs[0].get()); // Reassociate, using the same tab id. GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Reset tracking, and put the tab id back into the same window. GetTracker()->ResetSessionTracking(kTag); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab1)); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(free_node_ids.empty()); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Now that it's been mapped, it should be accessible both via the // GetSession as well as the GetTab. ASSERT_EQ(GetTracker()->GetTab(kTag, kTab1), session->windows[kWindow1]->wrapped_window.tabs[0].get()); ASSERT_EQ(GetTracker()->GetTabNodeIdsForTesting(kTag).size(), GetTracker()->GetTabNodeIdsForTesting(kTag).count(kTabNode1)); ASSERT_EQ(1U, GetTabNodePool()->Capacity()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } TEST_F(SyncedSessionTrackerTest, ReassociateTabOldMappedNewUnmapped) { std::set<int> free_node_ids; // First create an unmapped tab. GetTracker()->InitLocalSession(kTag, kSessionName, kDeviceType); EXPECT_FALSE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); GetTracker()->ReassociateLocalTab(kTabNode1, kTab1); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab1)); // Now, map the first one, deleting the second one. GetTracker()->ResetSessionTracking(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab1); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); SyncedSession* session = GetTracker()->GetSession(kTag); ASSERT_EQ(1U, session->windows.size()); ASSERT_EQ(1U, session->windows[kWindow1]->wrapped_window.tabs.size()); ASSERT_EQ(GetTracker()->GetTab(kTag, kTab1), session->windows[kWindow1]->wrapped_window.tabs[0].get()); // Create a second unmapped tab. GetTracker()->ReassociateLocalTab(kTabNode2, kTab2); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode2)); EXPECT_TRUE(GetTracker()->IsTabUnmappedForTesting(kTab2)); // Reassociate the second tab with node of the first tab. GetTracker()->ReassociateLocalTab(kTabNode1, kTab2); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_TRUE(GetTracker()->IsLocalTabNodeAssociated(kTabNode1)); EXPECT_FALSE(GetTracker()->IsLocalTabNodeAssociated(kTabNode2)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); // Now map the new one. GetTracker()->ResetSessionTracking(kTag); GetTracker()->PutWindowInSession(kTag, kWindow1); GetTracker()->PutTabInWindow(kTag, kWindow1, kTab2); GetTracker()->CleanupLocalTabs(&free_node_ids); ASSERT_TRUE(VerifyTabIntegrity(kTag)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab1)); EXPECT_FALSE(GetTracker()->IsTabUnmappedForTesting(kTab2)); // Now that it's been mapped, it should be accessible both via the // GetSession as well as the GetTab. ASSERT_EQ(GetTracker()->GetTab(kTag, kTab2), session->windows[kWindow1]->wrapped_window.tabs[0].get()); ASSERT_EQ(2U, GetTabNodePool()->Capacity()); ASSERT_TRUE(VerifyTabIntegrity(kTag)); } } // namespace sync_sessions
null
null
null
null
15,917
168
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
256,555
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * * Portions of the attached software ("Contribution") are developed by * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. * * The Contribution is licensed pursuant to the OpenSSL open source * license provided above. * * The elliptic curve binary polynomial software is originally written by * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems Laboratories. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <openssl/opensslconf.h> /* To see if OPENSSL_NO_EC is defined */ #ifdef OPENSSL_NO_EC int main(int argc, char *argv[]) { puts("Elliptic curves are disabled."); return 0; } #else # include <openssl/crypto.h> # include <openssl/bio.h> # include <openssl/evp.h> # include <openssl/bn.h> # include <openssl/ec.h> # ifndef OPENSSL_NO_ENGINE # include <openssl/engine.h> # endif # include <openssl/err.h> # include <openssl/rand.h> static const char rnd_seed[] = "string to make the random number generator " "think it has entropy"; /* declaration of the test functions */ int x9_62_tests(BIO *); int x9_62_test_internal(BIO *out, int nid, const char *r, const char *s); int test_builtin(BIO *); /* functions to change the RAND_METHOD */ int change_rand(void); int restore_rand(void); int fbytes(unsigned char *buf, int num); static RAND_METHOD fake_rand; static const RAND_METHOD *old_rand; int change_rand(void) { /* save old rand method */ if ((old_rand = RAND_get_rand_method()) == NULL) return 0; fake_rand.seed = old_rand->seed; fake_rand.cleanup = old_rand->cleanup; fake_rand.add = old_rand->add; fake_rand.status = old_rand->status; /* use own random function */ fake_rand.bytes = fbytes; fake_rand.pseudorand = old_rand->bytes; /* set new RAND_METHOD */ if (!RAND_set_rand_method(&fake_rand)) return 0; return 1; } int restore_rand(void) { if (!RAND_set_rand_method(old_rand)) return 0; else return 1; } static int fbytes_counter = 0, use_fake = 0; static const char *numbers[8] = { "651056770906015076056810763456358567190100156695615665659", "6140507067065001063065065565667405560006161556565665656654", "8763001015071075675010661307616710783570106710677817767166" "71676178726717", "7000000175690566466555057817571571075705015757757057795755" "55657156756655", "1275552191113212300012030439187146164646146646466749494799", "1542725565216523985789236956265265265235675811949404040041", "1456427555219115346513212300075341203043918714616464614664" "64667494947990", "1712787255652165239672857892369562652652652356758119494040" "40041670216363" }; int fbytes(unsigned char *buf, int num) { int ret; BIGNUM *tmp = NULL; if (use_fake == 0) return old_rand->bytes(buf, num); use_fake = 0; if (fbytes_counter >= 8) return 0; tmp = BN_new(); if (!tmp) return 0; if (!BN_dec2bn(&tmp, numbers[fbytes_counter])) { BN_free(tmp); return 0; } fbytes_counter++; if (num != BN_num_bytes(tmp) || !BN_bn2bin(tmp, buf)) ret = 0; else ret = 1; BN_free(tmp); return ret; } /* some tests from the X9.62 draft */ int x9_62_test_internal(BIO *out, int nid, const char *r_in, const char *s_in) { int ret = 0; const char message[] = "abc"; unsigned char digest[20]; unsigned int dgst_len = 0; EVP_MD_CTX *md_ctx = EVP_MD_CTX_new(); EC_KEY *key = NULL; ECDSA_SIG *signature = NULL; BIGNUM *r = NULL, *s = NULL; BIGNUM *kinv = NULL, *rp = NULL; const BIGNUM *sig_r, *sig_s; if (md_ctx == NULL) goto x962_int_err; /* get the message digest */ if (!EVP_DigestInit(md_ctx, EVP_sha1()) || !EVP_DigestUpdate(md_ctx, (const void *)message, 3) || !EVP_DigestFinal(md_ctx, digest, &dgst_len)) goto x962_int_err; BIO_printf(out, "testing %s: ", OBJ_nid2sn(nid)); /* create the key */ if ((key = EC_KEY_new_by_curve_name(nid)) == NULL) goto x962_int_err; use_fake = 1; if (!EC_KEY_generate_key(key)) goto x962_int_err; BIO_printf(out, "."); (void)BIO_flush(out); /* create the signature */ use_fake = 1; /* Use ECDSA_sign_setup to avoid use of ECDSA nonces */ if (!ECDSA_sign_setup(key, NULL, &kinv, &rp)) goto x962_int_err; signature = ECDSA_do_sign_ex(digest, 20, kinv, rp, key); if (signature == NULL) goto x962_int_err; BIO_printf(out, "."); (void)BIO_flush(out); /* compare the created signature with the expected signature */ if ((r = BN_new()) == NULL || (s = BN_new()) == NULL) goto x962_int_err; if (!BN_dec2bn(&r, r_in) || !BN_dec2bn(&s, s_in)) goto x962_int_err; ECDSA_SIG_get0(signature, &sig_r, &sig_s); if (BN_cmp(sig_r, r) || BN_cmp(sig_s, s)) goto x962_int_err; BIO_printf(out, "."); (void)BIO_flush(out); /* verify the signature */ if (ECDSA_do_verify(digest, 20, signature, key) != 1) goto x962_int_err; BIO_printf(out, "."); (void)BIO_flush(out); BIO_printf(out, " ok\n"); ret = 1; x962_int_err: if (!ret) BIO_printf(out, " failed\n"); EC_KEY_free(key); ECDSA_SIG_free(signature); BN_free(r); BN_free(s); EVP_MD_CTX_free(md_ctx); BN_clear_free(kinv); BN_clear_free(rp); return ret; } int x9_62_tests(BIO *out) { int ret = 0; BIO_printf(out, "some tests from X9.62:\n"); /* set own rand method */ if (!change_rand()) goto x962_err; if (!x9_62_test_internal(out, NID_X9_62_prime192v1, "3342403536405981729393488334694600415596881826869351677613", "5735822328888155254683894997897571951568553642892029982342")) goto x962_err; if (!x9_62_test_internal(out, NID_X9_62_prime239v1, "3086361431751678114926225473006680188549593787585317781474" "62058306432176", "3238135532097973577080787768312505059318910517550078427819" "78505179448783")) goto x962_err; # ifndef OPENSSL_NO_EC2M if (!x9_62_test_internal(out, NID_X9_62_c2tnb191v1, "87194383164871543355722284926904419997237591535066528048", "308992691965804947361541664549085895292153777025772063598")) goto x962_err; if (!x9_62_test_internal(out, NID_X9_62_c2tnb239v1, "2159633321041961198501834003903461262881815148684178964245" "5876922391552", "1970303740007316867383349976549972270528498040721988191026" "49413465737174")) goto x962_err; # endif ret = 1; x962_err: if (!restore_rand()) ret = 0; return ret; } int test_builtin(BIO *out) { EC_builtin_curve *curves = NULL; size_t crv_len = 0, n = 0; EC_KEY *eckey = NULL, *wrong_eckey = NULL; EC_GROUP *group; ECDSA_SIG *ecdsa_sig = NULL, *modified_sig = NULL; unsigned char digest[20], wrong_digest[20]; unsigned char *signature = NULL; const unsigned char *sig_ptr; unsigned char *sig_ptr2; unsigned char *raw_buf = NULL; const BIGNUM *sig_r, *sig_s; BIGNUM *modified_r = NULL, *modified_s = NULL; BIGNUM *unmodified_r = NULL, *unmodified_s = NULL; unsigned int sig_len, degree, r_len, s_len, bn_len, buf_len; int nid, ret = 0; /* fill digest values with some random data */ if (RAND_bytes(digest, 20) <= 0 || RAND_bytes(wrong_digest, 20) <= 0) { BIO_printf(out, "ERROR: unable to get random data\n"); goto builtin_err; } /* * create and verify a ecdsa signature with every available curve (with ) */ BIO_printf(out, "\ntesting ECDSA_sign() and ECDSA_verify() " "with some internal curves:\n"); /* get a list of all internal curves */ crv_len = EC_get_builtin_curves(NULL, 0); curves = OPENSSL_malloc(sizeof(*curves) * crv_len); if (curves == NULL) { BIO_printf(out, "malloc error\n"); goto builtin_err; } if (!EC_get_builtin_curves(curves, crv_len)) { BIO_printf(out, "unable to get internal curves\n"); goto builtin_err; } /* now create and verify a signature for every curve */ for (n = 0; n < crv_len; n++) { unsigned char dirt, offset; nid = curves[n].nid; if (nid == NID_ipsec4 || nid == NID_X25519) continue; /* create new ecdsa key (== EC_KEY) */ if ((eckey = EC_KEY_new()) == NULL) goto builtin_err; group = EC_GROUP_new_by_curve_name(nid); if (group == NULL) goto builtin_err; if (EC_KEY_set_group(eckey, group) == 0) goto builtin_err; EC_GROUP_free(group); degree = EC_GROUP_get_degree(EC_KEY_get0_group(eckey)); if (degree < 160) { /* drop the curve */ EC_KEY_free(eckey); eckey = NULL; continue; } BIO_printf(out, "%s: ", OBJ_nid2sn(nid)); /* create key */ if (!EC_KEY_generate_key(eckey)) { BIO_printf(out, " failed\n"); goto builtin_err; } /* create second key */ if ((wrong_eckey = EC_KEY_new()) == NULL) goto builtin_err; group = EC_GROUP_new_by_curve_name(nid); if (group == NULL) goto builtin_err; if (EC_KEY_set_group(wrong_eckey, group) == 0) goto builtin_err; EC_GROUP_free(group); if (!EC_KEY_generate_key(wrong_eckey)) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); /* check key */ if (!EC_KEY_check_key(eckey)) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); /* create signature */ sig_len = ECDSA_size(eckey); if ((signature = OPENSSL_malloc(sig_len)) == NULL) goto builtin_err; if (!ECDSA_sign(0, digest, 20, signature, &sig_len, eckey)) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); /* verify signature */ if (ECDSA_verify(0, digest, 20, signature, sig_len, eckey) != 1) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); /* verify signature with the wrong key */ if (ECDSA_verify(0, digest, 20, signature, sig_len, wrong_eckey) == 1) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); /* wrong digest */ if (ECDSA_verify(0, wrong_digest, 20, signature, sig_len, eckey) == 1) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); /* wrong length */ if (ECDSA_verify(0, digest, 20, signature, sig_len - 1, eckey) == 1) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); /* * Modify a single byte of the signature: to ensure we don't garble * the ASN1 structure, we read the raw signature and modify a byte in * one of the bignums directly. */ sig_ptr = signature; if ((ecdsa_sig = d2i_ECDSA_SIG(NULL, &sig_ptr, sig_len)) == NULL) { BIO_printf(out, " failed\n"); goto builtin_err; } ECDSA_SIG_get0(ecdsa_sig, &sig_r, &sig_s); /* Store the two BIGNUMs in raw_buf. */ r_len = BN_num_bytes(sig_r); s_len = BN_num_bytes(sig_s); bn_len = (degree + 7) / 8; if ((r_len > bn_len) || (s_len > bn_len)) { BIO_printf(out, " failed\n"); goto builtin_err; } buf_len = 2 * bn_len; if ((raw_buf = OPENSSL_zalloc(buf_len)) == NULL) goto builtin_err; BN_bn2bin(sig_r, raw_buf + bn_len - r_len); BN_bn2bin(sig_s, raw_buf + buf_len - s_len); /* Modify a single byte in the buffer. */ offset = raw_buf[10] % buf_len; dirt = raw_buf[11] ? raw_buf[11] : 1; raw_buf[offset] ^= dirt; /* Now read the BIGNUMs back in from raw_buf. */ modified_sig = ECDSA_SIG_new(); if (modified_sig == NULL) goto builtin_err; if (((modified_r = BN_bin2bn(raw_buf, bn_len, NULL)) == NULL) || ((modified_s = BN_bin2bn(raw_buf + bn_len, bn_len, NULL)) == NULL) || !ECDSA_SIG_set0(modified_sig, modified_r, modified_s)) { BN_free(modified_r); BN_free(modified_s); goto builtin_err; } sig_ptr2 = signature; sig_len = i2d_ECDSA_SIG(modified_sig, &sig_ptr2); if (ECDSA_verify(0, digest, 20, signature, sig_len, eckey) == 1) { BIO_printf(out, " failed\n"); goto builtin_err; } /* * Sanity check: undo the modification and verify signature. */ raw_buf[offset] ^= dirt; if (((unmodified_r = BN_bin2bn(raw_buf, bn_len, NULL)) == NULL) || ((unmodified_s = BN_bin2bn(raw_buf + bn_len, bn_len, NULL)) == NULL) || !ECDSA_SIG_set0(modified_sig, unmodified_r, unmodified_s)) { BN_free(unmodified_r); BN_free(unmodified_s); goto builtin_err; } sig_ptr2 = signature; sig_len = i2d_ECDSA_SIG(modified_sig, &sig_ptr2); if (ECDSA_verify(0, digest, 20, signature, sig_len, eckey) != 1) { BIO_printf(out, " failed\n"); goto builtin_err; } BIO_printf(out, "."); (void)BIO_flush(out); BIO_printf(out, " ok\n"); /* cleanup */ /* clean bogus errors */ ERR_clear_error(); OPENSSL_free(signature); signature = NULL; EC_KEY_free(eckey); eckey = NULL; EC_KEY_free(wrong_eckey); wrong_eckey = NULL; ECDSA_SIG_free(ecdsa_sig); ecdsa_sig = NULL; ECDSA_SIG_free(modified_sig); modified_sig = NULL; OPENSSL_free(raw_buf); raw_buf = NULL; } ret = 1; builtin_err: EC_KEY_free(eckey); EC_KEY_free(wrong_eckey); ECDSA_SIG_free(ecdsa_sig); ECDSA_SIG_free(modified_sig); OPENSSL_free(signature); OPENSSL_free(raw_buf); OPENSSL_free(curves); return ret; } int main(void) { int ret = 1; BIO *out; char *p; out = BIO_new_fp(stdout, BIO_NOCLOSE | BIO_FP_TEXT); p = getenv("OPENSSL_DEBUG_MEMORY"); if (p != NULL && strcmp(p, "on") == 0) CRYPTO_set_mem_debug(1); /* initialize the prng */ RAND_seed(rnd_seed, sizeof(rnd_seed)); /* the tests */ if (!x9_62_tests(out)) goto err; if (!test_builtin(out)) goto err; ret = 0; err: if (ret) BIO_printf(out, "\nECDSA test failed\n"); else BIO_printf(out, "\nECDSA test passed\n"); if (ret) ERR_print_errors(out); #ifndef OPENSSL_NO_CRYPTO_MDEBUG if (CRYPTO_mem_leaks(out) <= 0) ret = 1; #endif BIO_free(out); return ret; } #endif
null
null
null
null
118,000
39,953
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
204,948
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Stage 1 of the trace events. * * Override the macros in <trace/trace_events.h> to include the following: * * struct trace_event_raw_<call> { * struct trace_entry ent; * <type> <item>; * <type2> <item2>[<len>]; * [...] * }; * * The <type> <item> is created by the __field(type, item) macro or * the __array(type2, item2, len) macro. * We simply do "type item;", and that will create the fields * in the structure. */ #include <linux/trace_events.h> #ifndef TRACE_SYSTEM_VAR #define TRACE_SYSTEM_VAR TRACE_SYSTEM #endif #define __app__(x, y) str__##x##y #define __app(x, y) __app__(x, y) #define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name) #define TRACE_MAKE_SYSTEM_STR() \ static const char TRACE_SYSTEM_STRING[] = \ __stringify(TRACE_SYSTEM) TRACE_MAKE_SYSTEM_STR(); #undef TRACE_DEFINE_ENUM #define TRACE_DEFINE_ENUM(a) \ static struct trace_enum_map __used __initdata \ __##TRACE_SYSTEM##_##a = \ { \ .system = TRACE_SYSTEM_STRING, \ .enum_string = #a, \ .enum_value = a \ }; \ static struct trace_enum_map __used \ __attribute__((section("_ftrace_enum_map"))) \ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a /* * DECLARE_EVENT_CLASS can be used to add a generic function * handlers for events. That is, if all events have the same * parameters and just have distinct trace points. * Each tracepoint can be defined with DEFINE_EVENT and that * will map the DECLARE_EVENT_CLASS to the tracepoint. * * TRACE_EVENT is a one to one mapping between tracepoint and template. */ #undef TRACE_EVENT #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ DECLARE_EVENT_CLASS(name, \ PARAMS(proto), \ PARAMS(args), \ PARAMS(tstruct), \ PARAMS(assign), \ PARAMS(print)); \ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); #undef __field #define __field(type, item) type item; #undef __field_ext #define __field_ext(type, item, filter_type) type item; #undef __field_struct #define __field_struct(type, item) type item; #undef __field_struct_ext #define __field_struct_ext(type, item, filter_type) type item; #undef __array #define __array(type, item, len) type item[len]; #undef __dynamic_array #define __dynamic_array(type, item, len) u32 __data_loc_##item; #undef __string #define __string(item, src) __dynamic_array(char, item, -1) #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) #undef TP_STRUCT__entry #define TP_STRUCT__entry(args...) args #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ struct trace_event_raw_##name { \ struct trace_entry ent; \ tstruct \ char __data[0]; \ }; \ \ static struct trace_event_class event_class_##name; #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ static struct trace_event_call __used \ __attribute__((__aligned__(4))) event_##name #undef DEFINE_EVENT_FN #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) /* Callbacks are meaningless to ftrace. */ #undef TRACE_EVENT_FN #define TRACE_EVENT_FN(name, proto, args, tstruct, \ assign, print, reg, unreg) \ TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ #undef TRACE_EVENT_FN_COND #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ assign, print, reg, unreg) \ TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ #undef TRACE_EVENT_FLAGS #define TRACE_EVENT_FLAGS(name, value) \ __TRACE_EVENT_FLAGS(name, value) #undef TRACE_EVENT_PERF_PERM #define TRACE_EVENT_PERF_PERM(name, expr...) \ __TRACE_EVENT_PERF_PERM(name, expr) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* * Stage 2 of the trace events. * * Include the following: * * struct trace_event_data_offsets_<call> { * u32 <item1>; * u32 <item2>; * [...] * }; * * The __dynamic_array() macro will create each u32 <item>, this is * to keep the offset of each array from the beginning of the event. * The size of an array is also encoded, in the higher 16 bits of <item>. */ #undef TRACE_DEFINE_ENUM #define TRACE_DEFINE_ENUM(a) #undef __field #define __field(type, item) #undef __field_ext #define __field_ext(type, item, filter_type) #undef __field_struct #define __field_struct(type, item) #undef __field_struct_ext #define __field_struct_ext(type, item, filter_type) #undef __array #define __array(type, item, len) #undef __dynamic_array #define __dynamic_array(type, item, len) u32 item; #undef __string #define __string(item, src) __dynamic_array(char, item, -1) #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ struct trace_event_data_offsets_##call { \ tstruct; \ }; #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #undef TRACE_EVENT_FLAGS #define TRACE_EVENT_FLAGS(event, flag) #undef TRACE_EVENT_PERF_PERM #define TRACE_EVENT_PERF_PERM(event, expr...) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* * Stage 3 of the trace events. * * Override the macros in <trace/trace_events.h> to include the following: * * enum print_line_t * trace_raw_output_<call>(struct trace_iterator *iter, int flags) * { * struct trace_seq *s = &iter->seq; * struct trace_event_raw_<call> *field; <-- defined in stage 1 * struct trace_entry *entry; * struct trace_seq *p = &iter->tmp_seq; * int ret; * * entry = iter->ent; * * if (entry->type != event_<call>->event.type) { * WARN_ON_ONCE(1); * return TRACE_TYPE_UNHANDLED; * } * * field = (typeof(field))entry; * * trace_seq_init(p); * ret = trace_seq_printf(s, "%s: ", <call>); * if (ret) * ret = trace_seq_printf(s, <TP_printk> "\n"); * if (!ret) * return TRACE_TYPE_PARTIAL_LINE; * * return TRACE_TYPE_HANDLED; * } * * This is the method used to print the raw event to the trace * output format. Note, this is not needed if the data is read * in binary. */ #undef __entry #define __entry field #undef TP_printk #define TP_printk(fmt, args...) fmt "\n", args #undef __get_dynamic_array #define __get_dynamic_array(field) \ ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) #undef __get_dynamic_array_len #define __get_dynamic_array_len(field) \ ((__entry->__data_loc_##field >> 16) & 0xffff) #undef __get_str #define __get_str(field) ((char *)__get_dynamic_array(field)) #undef __get_bitmask #define __get_bitmask(field) \ ({ \ void *__bitmask = __get_dynamic_array(field); \ unsigned int __bitmask_size; \ __bitmask_size = __get_dynamic_array_len(field); \ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ }) #undef __print_flags #define __print_flags(flag, delim, flag_array...) \ ({ \ static const struct trace_print_flags __flags[] = \ { flag_array, { -1, NULL }}; \ trace_print_flags_seq(p, delim, flag, __flags); \ }) #undef __print_symbolic #define __print_symbolic(value, symbol_array...) \ ({ \ static const struct trace_print_flags symbols[] = \ { symbol_array, { -1, NULL }}; \ trace_print_symbols_seq(p, value, symbols); \ }) #undef __print_flags_u64 #undef __print_symbolic_u64 #if BITS_PER_LONG == 32 #define __print_flags_u64(flag, delim, flag_array...) \ ({ \ static const struct trace_print_flags_u64 __flags[] = \ { flag_array, { -1, NULL } }; \ trace_print_flags_seq_u64(p, delim, flag, __flags); \ }) #define __print_symbolic_u64(value, symbol_array...) \ ({ \ static const struct trace_print_flags_u64 symbols[] = \ { symbol_array, { -1, NULL } }; \ trace_print_symbols_seq_u64(p, value, symbols); \ }) #else #define __print_flags_u64(flag, delim, flag_array...) \ __print_flags(flag, delim, flag_array) #define __print_symbolic_u64(value, symbol_array...) \ __print_symbolic(value, symbol_array) #endif #undef __print_hex #define __print_hex(buf, buf_len) \ trace_print_hex_seq(p, buf, buf_len, false) #undef __print_hex_str #define __print_hex_str(buf, buf_len) \ trace_print_hex_seq(p, buf, buf_len, true) #undef __print_array #define __print_array(array, count, el_size) \ ({ \ BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ el_size != 4 && el_size != 8); \ trace_print_array_seq(p, array, count, el_size); \ }) #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace enum print_line_t \ trace_raw_output_##call(struct trace_iterator *iter, int flags, \ struct trace_event *trace_event) \ { \ struct trace_seq *s = &iter->seq; \ struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ struct trace_event_raw_##call *field; \ int ret; \ \ field = (typeof(field))iter->ent; \ \ ret = trace_raw_output_prep(iter, trace_event); \ if (ret != TRACE_TYPE_HANDLED) \ return ret; \ \ trace_seq_printf(s, print); \ \ return trace_handle_return(s); \ } \ static struct trace_event_functions trace_event_type_funcs_##call = { \ .trace = trace_raw_output_##call, \ }; #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ static notrace enum print_line_t \ trace_raw_output_##call(struct trace_iterator *iter, int flags, \ struct trace_event *event) \ { \ struct trace_event_raw_##template *field; \ struct trace_entry *entry; \ struct trace_seq *p = &iter->tmp_seq; \ \ entry = iter->ent; \ \ if (entry->type != event_##call.event.type) { \ WARN_ON_ONCE(1); \ return TRACE_TYPE_UNHANDLED; \ } \ \ field = (typeof(field))entry; \ \ trace_seq_init(p); \ return trace_output_call(iter, #call, print); \ } \ static struct trace_event_functions trace_event_type_funcs_##call = { \ .trace = trace_raw_output_##call, \ }; #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #undef __field_ext #define __field_ext(type, item, filter_type) \ ret = trace_define_field(event_call, #type, #item, \ offsetof(typeof(field), item), \ sizeof(field.item), \ is_signed_type(type), filter_type); \ if (ret) \ return ret; #undef __field_struct_ext #define __field_struct_ext(type, item, filter_type) \ ret = trace_define_field(event_call, #type, #item, \ offsetof(typeof(field), item), \ sizeof(field.item), \ 0, filter_type); \ if (ret) \ return ret; #undef __field #define __field(type, item) __field_ext(type, item, FILTER_OTHER) #undef __field_struct #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) #undef __array #define __array(type, item, len) \ do { \ char *type_str = #type"["__stringify(len)"]"; \ BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ ret = trace_define_field(event_call, type_str, #item, \ offsetof(typeof(field), item), \ sizeof(field.item), \ is_signed_type(type), FILTER_OTHER); \ if (ret) \ return ret; \ } while (0); #undef __dynamic_array #define __dynamic_array(type, item, len) \ ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ offsetof(typeof(field), __data_loc_##item), \ sizeof(field.__data_loc_##item), \ is_signed_type(type), FILTER_OTHER); #undef __string #define __string(item, src) __dynamic_array(char, item, -1) #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ static int notrace __init \ trace_event_define_fields_##call(struct trace_event_call *event_call) \ { \ struct trace_event_raw_##call field; \ int ret; \ \ tstruct; \ \ return ret; \ } #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* * remember the offset of each array from the beginning of the event. */ #undef __entry #define __entry entry #undef __field #define __field(type, item) #undef __field_ext #define __field_ext(type, item, filter_type) #undef __field_struct #define __field_struct(type, item) #undef __field_struct_ext #define __field_struct_ext(type, item, filter_type) #undef __array #define __array(type, item, len) #undef __dynamic_array #define __dynamic_array(type, item, len) \ __item_length = (len) * sizeof(type); \ __data_offsets->item = __data_size + \ offsetof(typeof(*entry), __data); \ __data_offsets->item |= __item_length << 16; \ __data_size += __item_length; #undef __string #define __string(item, src) __dynamic_array(char, item, \ strlen((src) ? (const char *)(src) : "(null)") + 1) /* * __bitmask_size_in_bytes_raw is the number of bytes needed to hold * num_possible_cpus(). */ #define __bitmask_size_in_bytes_raw(nr_bits) \ (((nr_bits) + 7) / 8) #define __bitmask_size_in_longs(nr_bits) \ ((__bitmask_size_in_bytes_raw(nr_bits) + \ ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) /* * __bitmask_size_in_bytes is the number of bytes needed to hold * num_possible_cpus() padded out to the nearest long. This is what * is saved in the buffer, just to be consistent. */ #define __bitmask_size_in_bytes(nr_bits) \ (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ __bitmask_size_in_longs(nr_bits)) #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static inline notrace int trace_event_get_offsets_##call( \ struct trace_event_data_offsets_##call *__data_offsets, proto) \ { \ int __data_size = 0; \ int __maybe_unused __item_length; \ struct trace_event_raw_##call __maybe_unused *entry; \ \ tstruct; \ \ return __data_size; \ } #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* * Stage 4 of the trace events. * * Override the macros in <trace/trace_events.h> to include the following: * * For those macros defined with TRACE_EVENT: * * static struct trace_event_call event_<call>; * * static void trace_event_raw_event_<call>(void *__data, proto) * { * struct trace_event_file *trace_file = __data; * struct trace_event_call *event_call = trace_file->event_call; * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; * unsigned long eflags = trace_file->flags; * enum event_trigger_type __tt = ETT_NONE; * struct ring_buffer_event *event; * struct trace_event_raw_<call> *entry; <-- defined in stage 1 * struct ring_buffer *buffer; * unsigned long irq_flags; * int __data_size; * int pc; * * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) * event_triggers_call(trace_file, NULL); * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) * return; * } * * local_save_flags(irq_flags); * pc = preempt_count(); * * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); * * event = trace_event_buffer_lock_reserve(&buffer, trace_file, * event_<call>->event.type, * sizeof(*entry) + __data_size, * irq_flags, pc); * if (!event) * return; * entry = ring_buffer_event_data(event); * * { <assign>; } <-- Here we assign the entries by the __field and * __array macros. * * if (eflags & EVENT_FILE_FL_TRIGGER_COND) * __tt = event_triggers_call(trace_file, entry); * * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, * &trace_file->flags)) * ring_buffer_discard_commit(buffer, event); * else if (!filter_check_discard(trace_file, entry, buffer, event)) * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); * * if (__tt) * event_triggers_post_call(trace_file, __tt); * } * * static struct trace_event ftrace_event_type_<call> = { * .trace = trace_raw_output_<call>, <-- stage 2 * }; * * static char print_fmt_<call>[] = <TP_printk>; * * static struct trace_event_class __used event_class_<template> = { * .system = "<system>", * .define_fields = trace_event_define_fields_<call>, * .fields = LIST_HEAD_INIT(event_class_##call.fields), * .raw_init = trace_event_raw_init, * .probe = trace_event_raw_event_##call, * .reg = trace_event_reg, * }; * * static struct trace_event_call event_<call> = { * .class = event_class_<template>, * { * .tp = &__tracepoint_<call>, * }, * .event = &ftrace_event_type_<call>, * .print_fmt = print_fmt_<call>, * .flags = TRACE_EVENT_FL_TRACEPOINT, * }; * // its only safe to use pointers when doing linker tricks to * // create an array. * static struct trace_event_call __used * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; * */ #ifdef CONFIG_PERF_EVENTS #define _TRACE_PERF_PROTO(call, proto) \ static notrace void \ perf_trace_##call(void *__data, proto); #define _TRACE_PERF_INIT(call) \ .perf_probe = perf_trace_##call, #else #define _TRACE_PERF_PROTO(call, proto) #define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ #undef __entry #define __entry entry #undef __field #define __field(type, item) #undef __field_struct #define __field_struct(type, item) #undef __array #define __array(type, item, len) #undef __dynamic_array #define __dynamic_array(type, item, len) \ __entry->__data_loc_##item = __data_offsets.item; #undef __string #define __string(item, src) __dynamic_array(char, item, -1) #undef __assign_str #define __assign_str(dst, src) \ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) #undef __assign_bitmask #define __assign_bitmask(dst, src, nr_bits) \ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) #undef TP_fast_assign #define TP_fast_assign(args...) args #undef __perf_count #define __perf_count(c) (c) #undef __perf_task #define __perf_task(t) (t) #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ \ static notrace void \ trace_event_raw_event_##call(void *__data, proto) \ { \ struct trace_event_file *trace_file = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ struct trace_event_buffer fbuffer; \ struct trace_event_raw_##call *entry; \ int __data_size; \ \ if (trace_trigger_soft_disabled(trace_file)) \ return; \ \ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ \ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ sizeof(*entry) + __data_size); \ \ if (!entry) \ return; \ \ tstruct \ \ { assign; } \ \ trace_event_buffer_commit(&fbuffer); \ } /* * The ftrace_test_probe is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the ftrace probe will * fail to compile unless it too is updated. */ #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ static inline void ftrace_test_probe_##call(void) \ { \ check_trace_callback_type_##call(trace_event_raw_event_##template); \ } #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #undef __entry #define __entry REC #undef __print_flags #undef __print_symbolic #undef __print_hex #undef __print_hex_str #undef __get_dynamic_array #undef __get_dynamic_array_len #undef __get_str #undef __get_bitmask #undef __print_array #undef TP_printk #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ _TRACE_PERF_PROTO(call, PARAMS(proto)); \ static char print_fmt_##call[] = print; \ static struct trace_event_class __used __refdata event_class_##call = { \ .system = TRACE_SYSTEM_STRING, \ .define_fields = trace_event_define_fields_##call, \ .fields = LIST_HEAD_INIT(event_class_##call.fields),\ .raw_init = trace_event_raw_init, \ .probe = trace_event_raw_event_##call, \ .reg = trace_event_reg, \ _TRACE_PERF_INIT(call) \ }; #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ \ static struct trace_event_call __used event_##call = { \ .class = &event_class_##template, \ { \ .tp = &__tracepoint_##call, \ }, \ .event.funcs = &trace_event_type_funcs_##template, \ .print_fmt = print_fmt_##template, \ .flags = TRACE_EVENT_FL_TRACEPOINT, \ }; \ static struct trace_event_call __used \ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ \ static char print_fmt_##call[] = print; \ \ static struct trace_event_call __used event_##call = { \ .class = &event_class_##template, \ { \ .tp = &__tracepoint_##call, \ }, \ .event.funcs = &trace_event_type_funcs_##call, \ .print_fmt = print_fmt_##call, \ .flags = TRACE_EVENT_FL_TRACEPOINT, \ }; \ static struct trace_event_call __used \ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
null
null
null
null
113,295
66,076
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
66,076
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <algorithm> #include "base/test/trace_event_analyzer.h" #include "chrome/browser/media/webrtc/webrtc_browsertest_base.h" #include "chrome/browser/media/webrtc/webrtc_browsertest_common.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/browser_tabstrip.h" #include "chrome/test/base/tracing.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/render_view_host.h" #include "content/public/common/content_switches.h" #include "content/public/common/feature_h264_with_openh264_ffmpeg.h" #include "media/base/media_switches.h" #include "net/test/embedded_test_server/embedded_test_server.h" #include "testing/perf/perf_test.h" #include "ui/gl/gl_switches.h" using trace_analyzer::TraceEvent; using trace_analyzer::TraceEventVector; using trace_analyzer::Query; namespace { // Trace events static const char kStartRenderEventName[] = "RemoteVideoSourceDelegate::RenderFrame"; static const char kEnqueueFrameEventName[] = "WebMediaPlayerMSCompositor::EnqueueFrame"; static const char kSetFrameEventName[] = "WebMediaPlayerMSCompositor::SetCurrentFrame"; static const char kGetFrameEventName[] = "WebMediaPlayerMSCompositor::GetCurrentFrame"; static const char kVideoResourceEventName[] = "VideoResourceUpdater::ObtainFrameResources"; static const char kVsyncEventName[] = "Display::DrawAndSwap"; static const char kEventMatchKey[] = "Timestamp"; static const char kTestResultString[] = "TestVideoDisplayPerf"; static const char kMainWebrtcTestHtmlPage[] = "/webrtc/webrtc_video_display_perf_test.html"; static const struct VideoDisplayPerfTestConfig { int width; int height; int fps; } kVideoConfigurations[] = {{1280, 720, 30}, {1280, 720, 60}, {1920, 1080, 30}, {1920, 1080, 60}}; void CalculateMeanAndMax(const std::vector<double>& inputs, double* mean, double* std_dev, double* max) { double sum = 0.0; double sqr_sum = 0.0; double max_so_far = 0.0; size_t count = inputs.size(); for (const auto& input : inputs) { sum += input; sqr_sum += input * input; max_so_far = std::max(input, max_so_far); } *max = max_so_far; *mean = sum / count; *std_dev = sqrt(std::max(0.0, count * sqr_sum - sum * sum)) / count; } void PrintMeanAndMax(const std::string& var_name, const std::string& name_modifier, const std::vector<double>& vars) { double mean = 0.0; double std_dev = 0.0; double max = 0.0; CalculateMeanAndMax(vars, &mean, &std_dev, &max); perf_test::PrintResultMeanAndError( kTestResultString, name_modifier, var_name + " Mean", base::StringPrintf("%.0lf,%.0lf", mean, std_dev), "μs", true); perf_test::PrintResult(kTestResultString, name_modifier, var_name + " Max", base::StringPrintf("%.0lf", max), "μs", true); } void FindEvents(trace_analyzer::TraceAnalyzer* analyzer, const std::string& event_name, const Query& base_query, TraceEventVector* events) { Query query = Query::EventNameIs(event_name) && base_query; analyzer->FindEvents(query, events); } void AssociateEvents(trace_analyzer::TraceAnalyzer* analyzer, const std::vector<std::string>& event_names, const std::string& match_string, const Query& base_query) { for (size_t i = 0; i < event_names.size() - 1; ++i) { Query begin = Query::EventNameIs(event_names[i]); Query end = Query::EventNameIs(event_names[i + 1]); Query match(Query::EventArg(match_string) == Query::OtherArg(match_string)); analyzer->AssociateEvents(begin, end, base_query && match); } } } // anonymous namespace // Tests the performance of Chrome displaying remote video. // // This test creates a WebRTC peer connection between two tabs and measures the // trace events listed in the beginning of this file on the tab receiving // remote video. In order to cut down from the encode cost, the tab receiving // remote video does not send any video to its peer. // // This test traces certain categories for a period of time. It follows the // lifetime of a single video frame by synchronizing on the timestamps values // attached to trace events. Then, it calculates the duration and related stats. class WebRtcVideoDisplayPerfBrowserTest : public WebRtcTestBase, public testing::WithParamInterface<VideoDisplayPerfTestConfig> { public: WebRtcVideoDisplayPerfBrowserTest() { test_config_ = GetParam(); } void SetUpInProcessBrowserTestFixture() override { DetectErrorsInJavaScript(); } void SetUpCommandLine(base::CommandLine* command_line) override { command_line->AppendSwitch(switches::kUseFakeUIForMediaStream); command_line->AppendSwitchASCII( switches::kUseFakeDeviceForMediaStream, base::StringPrintf("fps=%d", test_config_.fps)); command_line->AppendSwitch(switches::kUseGpuInTests); } void TestVideoDisplayPerf(const std::string& video_codec) { ASSERT_TRUE(embedded_test_server()->Start()); content::WebContents* left_tab = OpenPageAndGetUserMediaInNewTabWithConstraints( embedded_test_server()->GetURL(kMainWebrtcTestHtmlPage), base::StringPrintf( "{audio: true, video: {mandatory: {minWidth: %d, maxWidth: %d, " "minHeight: %d, maxHeight: %d}}}", test_config_.width, test_config_.width, test_config_.height, test_config_.height)); content::WebContents* right_tab = OpenPageAndGetUserMediaInNewTabWithConstraints( embedded_test_server()->GetURL(kMainWebrtcTestHtmlPage), "{audio: true, video: false}"); const int process_id = base::GetProcId( right_tab->GetRenderViewHost()->GetProcess()->GetHandle()); const std::string disable_cpu_adaptation_constraint( "{'optional': [{'googCpuOveruseDetection': false}]}"); SetupPeerconnectionWithConstraintsAndLocalStream( left_tab, disable_cpu_adaptation_constraint); SetupPeerconnectionWithConstraintsAndLocalStream( right_tab, disable_cpu_adaptation_constraint); if (!video_codec.empty()) { SetDefaultVideoCodec(left_tab, video_codec, true /*prefer_hw_video_codec*/); SetDefaultVideoCodec(right_tab, video_codec, true /*prefer_hw_video_codec*/); } NegotiateCall(left_tab, right_tab); StartDetectingVideo(right_tab, "remote-view"); WaitForVideoToPlay(right_tab); // Run the connection a bit to ramp up. test::SleepInJavascript(left_tab, 10000); ASSERT_TRUE(tracing::BeginTracing("media,viz,webrtc")); // Run the connection for 5 seconds to collect metrics. test::SleepInJavascript(left_tab, 5000); std::string json_events; ASSERT_TRUE(tracing::EndTracing(&json_events)); std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer( trace_analyzer::TraceAnalyzer::Create(json_events)); HangUp(left_tab); HangUp(right_tab); chrome::CloseWebContents(browser(), left_tab, false); chrome::CloseWebContents(browser(), right_tab, false); ASSERT_TRUE(CalculatePerfResults(analyzer.get(), process_id)); PrintResults(video_codec); } private: bool CalculatePerfResults(trace_analyzer::TraceAnalyzer* analyzer, int render_process_id) { Query match_process_id = Query::EventPidIs(render_process_id); const std::vector<std::string> chain_of_events = { kStartRenderEventName, kEnqueueFrameEventName, kSetFrameEventName, kGetFrameEventName, kVideoResourceEventName}; AssociateEvents(analyzer, chain_of_events, kEventMatchKey, match_process_id); TraceEventVector start_render_events; FindEvents(analyzer, kStartRenderEventName, match_process_id, &start_render_events); if (start_render_events.empty()) return false; // We are only interested in vsync events coming after the first render // event. Earlier ones are already missed. Query after_first_render_event = Query::EventTime() > Query::Double(start_render_events.front()->timestamp); TraceEventVector vsync_events; FindEvents(analyzer, kVsyncEventName, after_first_render_event, &vsync_events); if (vsync_events.empty()) return false; size_t found_vsync_index = 0; size_t skipped_frame_count = 0; for (const auto* event : start_render_events) { const double start = event->timestamp; const TraceEvent* enqueue_frame_event = event->other_event; if (!enqueue_frame_event) { skipped_frame_count++; continue; } const double enqueue_frame_duration = enqueue_frame_event->timestamp - start; const TraceEvent* set_frame_event = enqueue_frame_event->other_event; if (!set_frame_event) { skipped_frame_count++; continue; } const double set_frame_duration = set_frame_event->timestamp - enqueue_frame_event->timestamp; const TraceEvent* get_frame_event = set_frame_event->other_event; if (!get_frame_event) { skipped_frame_count++; continue; } const double get_frame_duration = get_frame_event->timestamp - set_frame_event->timestamp; const TraceEvent* video_resource_event = get_frame_event->other_event; if (!video_resource_event) { skipped_frame_count++; continue; } const double resource_ready_duration = video_resource_event->timestamp - get_frame_event->timestamp; // We try to find the closest vsync event after video resource is ready. const bool found_vsync = FindFirstOf( vsync_events, Query::EventTime() > Query::Double(video_resource_event->timestamp + video_resource_event->duration), found_vsync_index, &found_vsync_index); if (!found_vsync) { skipped_frame_count++; continue; } const double vsync_duration = vsync_events[found_vsync_index]->timestamp - video_resource_event->timestamp; const double total_duration = vsync_events[found_vsync_index]->timestamp - start; enqueue_frame_durations_.push_back(enqueue_frame_duration); set_frame_durations_.push_back(set_frame_duration); get_frame_durations_.push_back(get_frame_duration); resource_ready_durations_.push_back(resource_ready_duration); vsync_durations_.push_back(vsync_duration); total_controlled_durations_.push_back(total_duration - set_frame_duration); total_durations_.push_back(total_duration); } if (start_render_events.size() == skipped_frame_count) return false; // Calculate the percentage by dividing by the number of frames received. skipped_frame_percentage_ = 100.0 * skipped_frame_count / start_render_events.size(); return true; } void PrintResults(const std::string& video_codec) { std::string name_modifier = base::StringPrintf("%s_%dp%df", video_codec.c_str(), test_config_.height, test_config_.fps); perf_test::PrintResult( kTestResultString, name_modifier, "Skipped frames", base::StringPrintf("%.2lf", skipped_frame_percentage_), "percent", true); // We identify intervals in a way that can help us easily bisect the source // of added latency in case of a regression. From these intervals, "Render // Algorithm" can take random amount of times based on the vsync cycle it is // closest to. Therefore, "Total Controlled Latency" refers to the total // times without that section for semi-consistent results. PrintMeanAndMax("Passing to Render Algorithm Latency", name_modifier, enqueue_frame_durations_); PrintMeanAndMax("Render Algorithm Latency", name_modifier, set_frame_durations_); PrintMeanAndMax("Compositor Picking Frame Latency", name_modifier, get_frame_durations_); PrintMeanAndMax("Compositor Resource Preparation Latency", name_modifier, resource_ready_durations_); PrintMeanAndMax("Vsync Latency", name_modifier, vsync_durations_); PrintMeanAndMax("Total Controlled Latency", name_modifier, total_controlled_durations_); PrintMeanAndMax("Total Latency", name_modifier, total_durations_); } VideoDisplayPerfTestConfig test_config_; // Containers for test results. double skipped_frame_percentage_ = 0; std::vector<double> enqueue_frame_durations_; std::vector<double> set_frame_durations_; std::vector<double> get_frame_durations_; std::vector<double> resource_ready_durations_; std::vector<double> vsync_durations_; std::vector<double> total_controlled_durations_; std::vector<double> total_durations_; }; INSTANTIATE_TEST_CASE_P(WebRtcVideoDisplayPerfBrowserTests, WebRtcVideoDisplayPerfBrowserTest, testing::ValuesIn(kVideoConfigurations)); IN_PROC_BROWSER_TEST_P(WebRtcVideoDisplayPerfBrowserTest, MANUAL_TestVideoDisplayPerfVP9) { TestVideoDisplayPerf("VP9"); } #if BUILDFLAG(RTC_USE_H264) IN_PROC_BROWSER_TEST_P(WebRtcVideoDisplayPerfBrowserTest, MANUAL_TestVideoDisplayPerfH264) { if (!base::FeatureList::IsEnabled(content::kWebRtcH264WithOpenH264FFmpeg)) { LOG(WARNING) << "Run-time feature WebRTC-H264WithOpenH264FFmpeg disabled. " "Skipping WebRtcVideoDisplayPerfBrowserTest.MANUAL_" "TestVideoDisplayPerfH264 " "(test \"OK\")"; return; } TestVideoDisplayPerf("H264"); } #endif // BUILDFLAG(RTC_USE_H264)
null
null
null
null
62,939
25,923
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
190,918
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * SMU_7_0_0 Register documentation * * Copyright (C) 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef SMU_7_0_0_SH_MASK_H #define SMU_7_0_0_SH_MASK_H #define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff #define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 #define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff #define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 #define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f #define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0 #define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100 #define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8 #define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200 #define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9 #define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 #define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa #define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1 #define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0 #define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2 #define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1 #define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f #define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0 #define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100 #define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8 #define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200 #define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9 #define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 #define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa #define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1 #define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0 #define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2 #define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1 #define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f #define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0 #define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100 #define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8 #define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200 #define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9 #define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 #define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa #define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1 #define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0 #define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2 #define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1 #define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f #define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0 #define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100 #define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8 #define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200 #define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9 #define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 #define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa #define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1 #define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0 #define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2 #define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1 #define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4 #define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2 #define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8 #define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3 #define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10 #define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4 #define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20 #define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5 #define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40 #define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6 #define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80 #define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7 #define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100 #define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8 #define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200 #define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9 #define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400 #define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa #define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800 #define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb #define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000 #define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc #define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1 #define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0 #define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2 #define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1 #define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4 #define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2 #define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8 #define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3 #define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10 #define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4 #define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0 #define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5 #define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800 #define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb #define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000 #define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc #define CG_SPLL_FUNC_CNTL__SPLL_BG_PWRON_MASK 0x2000 #define CG_SPLL_FUNC_CNTL__SPLL_BG_PWRON__SHIFT 0xd #define CG_SPLL_FUNC_CNTL__SPLL_BGADJ_MASK 0x3c000 #define CG_SPLL_FUNC_CNTL__SPLL_BGADJ__SHIFT 0xe #define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x1fc0000 #define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x12 #define CG_SPLL_FUNC_CNTL__SPLL_REG_BIAS_MASK 0xe000000 #define CG_SPLL_FUNC_CNTL__SPLL_REG_BIAS__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000 #define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c #define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff #define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0 #define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800 #define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb #define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000 #define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16 #define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000 #define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17 #define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000 #define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18 #define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000 #define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000 #define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a #define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000 #define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b #define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000 #define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c #define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000 #define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e #define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff #define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0 #define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000 #define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0 #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60 #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5 #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180 #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7 #define CG_SPLL_FUNC_CNTL_4__SPLL_SSAMP_EN_MASK 0x200 #define CG_SPLL_FUNC_CNTL_4__SPLL_SSAMP_EN__SHIFT 0x9 #define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0x7fc00 #define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0xa #define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000 #define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15 #define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000 #define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17 #define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000 #define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18 #define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000 #define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000 #define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a #define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000 #define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c #define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000 #define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f #define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1 #define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0 #define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2 #define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1 #define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc #define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2 #define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30 #define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4 #define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0 #define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6 #define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100 #define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8 #define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200 #define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9 #define CG_SPLL_FUNC_CNTL_5__REFCLK_BYPASS_EN_MASK 0x400 #define CG_SPLL_FUNC_CNTL_5__REFCLK_BYPASS_EN__SHIFT 0xa #define CG_SPLL_FUNC_CNTL_5__PLLBYPASS_MASK 0x800 #define CG_SPLL_FUNC_CNTL_5__PLLBYPASS__SHIFT 0xb #define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff #define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0 #define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00 #define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8 #define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000 #define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10 #define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000 #define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11 #define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000 #define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15 #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000 #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4 #define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2 #define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8 #define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3 #define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10 #define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4 #define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00 #define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa #define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000 #define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc #define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000 #define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c #define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000 #define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d #define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1 #define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0 #define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0 #define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4 #define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff #define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0 #define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00 #define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8 #define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2 #define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1 #define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4 #define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2 #define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1 #define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0 #define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8 #define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3 #define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100 #define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8 #define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000 #define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe #define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000 #define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf #define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000 #define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10 #define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000 #define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11 #define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000 #define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12 #define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000 #define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13 #define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000 #define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14 #define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000 #define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15 #define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000 #define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16 #define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000 #define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18 #define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff #define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0 #define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00 #define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8 #define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000 #define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10 #define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff #define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0 #define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00 #define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8 #define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000 #define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10 #define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f #define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0 #define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0 #define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5 #define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00 #define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa #define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000 #define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11 #define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000 #define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12 #define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000 #define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11 #define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7 #define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0 #define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38 #define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3 #define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0 #define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6 #define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00 #define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9 #define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000 #define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc #define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000 #define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf #define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000 #define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12 #define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000 #define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15 #define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000 #define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18 #define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000 #define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b #define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff #define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0 #define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff #define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7 #define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0 #define SMC_RESP_0__SMC_RESP_MASK 0xffff #define SMC_RESP_0__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0 #define SMC_RESP_1__SMC_RESP_MASK 0xffff #define SMC_RESP_1__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0 #define SMC_RESP_2__SMC_RESP_MASK 0xffff #define SMC_RESP_2__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0 #define SMC_RESP_3__SMC_RESP_MASK 0xffff #define SMC_RESP_3__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0 #define SMC_RESP_4__SMC_RESP_MASK 0xffff #define SMC_RESP_4__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0 #define SMC_RESP_5__SMC_RESP_MASK 0xffff #define SMC_RESP_5__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0 #define SMC_RESP_6__SMC_RESP_MASK 0xffff #define SMC_RESP_6__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0 #define SMC_RESP_7__SMC_RESP_MASK 0xffff #define SMC_RESP_7__SMC_RESP__SHIFT 0x0 #define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0 #define SMC_RESP_8__SMC_RESP_MASK 0xffff #define SMC_RESP_8__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0 #define SMC_RESP_9__SMC_RESP_MASK 0xffff #define SMC_RESP_9__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0 #define SMC_RESP_10__SMC_RESP_MASK 0xffff #define SMC_RESP_10__SMC_RESP__SHIFT 0x0 #define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff #define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0 #define SMC_RESP_11__SMC_RESP_MASK 0xffff #define SMC_RESP_11__SMC_RESP__SHIFT 0x0 #define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0 #define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff #define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0 #define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1 #define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0 #define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2 #define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1 #define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000 #define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e #define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1 #define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0 #define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2 #define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1 #define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00 #define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8 #define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000 #define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18 #define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1 #define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0 #define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff #define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0 #define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff #define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0 #define SMC_PC_C__smc_pc_c_MASK 0xffffffff #define SMC_PC_C__smc_pc_c__SHIFT 0x0 #define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff #define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0 #define CG_FPS_CNT__FPS_CNT_MASK 0xff #define CG_FPS_CNT__FPS_CNT__SHIFT 0x0 #define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff #define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 #define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff #define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 #define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1 #define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0 #define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2 #define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1 #define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4 #define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2 #define RCU_UC_EVENTS__TP_Tester_MASK 0x40 #define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6 #define RCU_UC_EVENTS__boot_seq_done_MASK 0x80 #define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7 #define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100 #define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8 #define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200 #define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9 #define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400 #define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa #define RCU_UC_EVENTS__FCH_HALT_MASK 0x800 #define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb #define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000 #define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd #define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000 #define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10 #define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000 #define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11 #define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000 #define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12 #define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000 #define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13 #define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000 #define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18 #define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2 #define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1 #define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8 #define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3 #define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10 #define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4 #define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20 #define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5 #define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100 #define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8 #define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000 #define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10 #define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000 #define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11 #define RCU_MISC_CTRL__SAMU_START_MASK 0x400000 #define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16 #define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000 #define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17 #define CC_RCU_FUSES__GPU_DIS_MASK 0x2 #define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1 #define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4 #define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2 #define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10 #define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4 #define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20 #define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5 #define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40 #define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6 #define CC_RCU_FUSES__ROM_DIS_MASK 0x80 #define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7 #define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100 #define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8 #define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200 #define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9 #define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400 #define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa #define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x4000 #define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0xe #define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x8000 #define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xf #define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x10000 #define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0x10 #define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x20000 #define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x11 #define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x40000 #define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x12 #define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x80000 #define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x13 #define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x100000 #define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x14 #define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x400000 #define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x16 #define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x800000 #define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x17 #define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x1000000 #define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x18 #define CC_RCU_FUSES__RCU_SPARE_MASK 0x7e000000 #define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x19 #define CC_RCU_FUSES__PSP_ENABLE_MASK 0x80000000 #define CC_RCU_FUSES__PSP_ENABLE__SHIFT 0x1f #define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2 #define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1 #define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc #define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2 #define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600 #define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9 #define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800 #define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb #define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000 #define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12 #define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000 #define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13 #define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000 #define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14 #define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000 #define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15 #define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000 #define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16 #define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000 #define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17 #define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000 #define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b #define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000 #define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c #define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000 #define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d #define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff #define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0 #define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00 #define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8 #define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000 #define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10 #define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000 #define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18 #define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe #define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1 #define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3fffe #define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1 #define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e #define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1 #define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40 #define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6 #define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80 #define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7 #define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100 #define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8 #define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200 #define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9 #define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400 #define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa #define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800 #define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb #define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000 #define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc #define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000 #define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd #define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000 #define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe #define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000 #define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16 #define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000 #define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17 #define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000 #define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18 #define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000 #define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19 #define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000 #define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a #define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0 #define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4 #define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000 #define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14 #define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000 #define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18 #define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2 #define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1 #define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff #define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0 #define SMU_STATUS__SMU_DONE_MASK 0x1 #define SMU_STATUS__SMU_DONE__SHIFT 0x0 #define SMU_STATUS__SMU_PASS_MASK 0x2 #define SMU_STATUS__SMU_PASS__SHIFT 0x1 #define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1 #define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0 #define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6 #define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1 #define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8 #define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3 #define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10 #define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4 #define SMU_FIRMWARE__SMU_counter_MASK 0xf00 #define SMU_FIRMWARE__SMU_counter__SHIFT 0x8 #define SMU_FIRMWARE__SMU_MODE_MASK 0x10000 #define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10 #define SMU_FIRMWARE__SMU_SEL_MASK 0x20000 #define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11 #define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff #define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0 #define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000 #define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f #define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff #define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0 #define DPM_TABLE_1__SystemFlags_MASK 0xffffffff #define DPM_TABLE_1__SystemFlags__SHIFT 0x0 #define DPM_TABLE_2__GraphicsPIDController_Ki_MASK 0xffffffff #define DPM_TABLE_2__GraphicsPIDController_Ki__SHIFT 0x0 #define DPM_TABLE_3__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff #define DPM_TABLE_3__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0 #define DPM_TABLE_4__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff #define DPM_TABLE_4__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0 #define DPM_TABLE_5__GraphicsPIDController_StatePrecision_MASK 0xffffffff #define DPM_TABLE_5__GraphicsPIDController_StatePrecision__SHIFT 0x0 #define DPM_TABLE_6__GraphicsPIDController_LfPrecision_MASK 0xffffffff #define DPM_TABLE_6__GraphicsPIDController_LfPrecision__SHIFT 0x0 #define DPM_TABLE_7__GraphicsPIDController_LfOffset_MASK 0xffffffff #define DPM_TABLE_7__GraphicsPIDController_LfOffset__SHIFT 0x0 #define DPM_TABLE_8__GraphicsPIDController_MaxState_MASK 0xffffffff #define DPM_TABLE_8__GraphicsPIDController_MaxState__SHIFT 0x0 #define DPM_TABLE_9__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff #define DPM_TABLE_9__GraphicsPIDController_MaxLfFraction__SHIFT 0x0 #define DPM_TABLE_10__GraphicsPIDController_StateShift_MASK 0xffffffff #define DPM_TABLE_10__GraphicsPIDController_StateShift__SHIFT 0x0 #define DPM_TABLE_11__GioPIDController_Ki_MASK 0xffffffff #define DPM_TABLE_11__GioPIDController_Ki__SHIFT 0x0 #define DPM_TABLE_12__GioPIDController_LFWindupUpperLim_MASK 0xffffffff #define DPM_TABLE_12__GioPIDController_LFWindupUpperLim__SHIFT 0x0 #define DPM_TABLE_13__GioPIDController_LFWindupLowerLim_MASK 0xffffffff #define DPM_TABLE_13__GioPIDController_LFWindupLowerLim__SHIFT 0x0 #define DPM_TABLE_14__GioPIDController_StatePrecision_MASK 0xffffffff #define DPM_TABLE_14__GioPIDController_StatePrecision__SHIFT 0x0 #define DPM_TABLE_15__GioPIDController_LfPrecision_MASK 0xffffffff #define DPM_TABLE_15__GioPIDController_LfPrecision__SHIFT 0x0 #define DPM_TABLE_16__GioPIDController_LfOffset_MASK 0xffffffff #define DPM_TABLE_16__GioPIDController_LfOffset__SHIFT 0x0 #define DPM_TABLE_17__GioPIDController_MaxState_MASK 0xffffffff #define DPM_TABLE_17__GioPIDController_MaxState__SHIFT 0x0 #define DPM_TABLE_18__GioPIDController_MaxLfFraction_MASK 0xffffffff #define DPM_TABLE_18__GioPIDController_MaxLfFraction__SHIFT 0x0 #define DPM_TABLE_19__GioPIDController_StateShift_MASK 0xffffffff #define DPM_TABLE_19__GioPIDController_StateShift__SHIFT 0x0 #define DPM_TABLE_20__VceLevelCount_MASK 0xff #define DPM_TABLE_20__VceLevelCount__SHIFT 0x0 #define DPM_TABLE_20__UvdLevelCount_MASK 0xff00 #define DPM_TABLE_20__UvdLevelCount__SHIFT 0x8 #define DPM_TABLE_20__GIOLevelCount_MASK 0xff0000 #define DPM_TABLE_20__GIOLevelCount__SHIFT 0x10 #define DPM_TABLE_20__GraphicsDpmLevelCount_MASK 0xff000000 #define DPM_TABLE_20__GraphicsDpmLevelCount__SHIFT 0x18 #define DPM_TABLE_21__FpsHighThreshold_MASK 0xffff #define DPM_TABLE_21__FpsHighThreshold__SHIFT 0x0 #define DPM_TABLE_21__SamuLevelCount_MASK 0xff0000 #define DPM_TABLE_21__SamuLevelCount__SHIFT 0x10 #define DPM_TABLE_21__AcpLevelCount_MASK 0xff000000 #define DPM_TABLE_21__AcpLevelCount__SHIFT 0x18 #define DPM_TABLE_22__GraphicsLevel_0_MinVddNb_MASK 0xffffffff #define DPM_TABLE_22__GraphicsLevel_0_MinVddNb__SHIFT 0x0 #define DPM_TABLE_23__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_23__GraphicsLevel_0_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_24__GraphicsLevel_0_ActivityLevel_MASK 0xffff #define DPM_TABLE_24__GraphicsLevel_0_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_24__GraphicsLevel_0_VidOffset_MASK 0xff0000 #define DPM_TABLE_24__GraphicsLevel_0_VidOffset__SHIFT 0x10 #define DPM_TABLE_24__GraphicsLevel_0_Vid_MASK 0xff000000 #define DPM_TABLE_24__GraphicsLevel_0_Vid__SHIFT 0x18 #define DPM_TABLE_25__GraphicsLevel_0_SclkDid_MASK 0xff #define DPM_TABLE_25__GraphicsLevel_0_SclkDid__SHIFT 0x0 #define DPM_TABLE_25__GraphicsLevel_0_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_25__GraphicsLevel_0_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_25__GraphicsLevel_0_GnbSlow_MASK 0xff0000 #define DPM_TABLE_25__GraphicsLevel_0_GnbSlow__SHIFT 0x10 #define DPM_TABLE_25__GraphicsLevel_0_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_25__GraphicsLevel_0_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_26__GraphicsLevel_0_UpHyst_MASK 0xff #define DPM_TABLE_26__GraphicsLevel_0_UpHyst__SHIFT 0x0 #define DPM_TABLE_26__GraphicsLevel_0_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_26__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_26__GraphicsLevel_0_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_26__GraphicsLevel_0_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_26__GraphicsLevel_0_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_26__GraphicsLevel_0_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_27__GraphicsLevel_0_ClkBypassCntl_MASK 0xff #define DPM_TABLE_27__GraphicsLevel_0_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_27__GraphicsLevel_0_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_27__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_27__GraphicsLevel_0_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_27__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_27__GraphicsLevel_0_DownHyst_MASK 0xff000000 #define DPM_TABLE_27__GraphicsLevel_0_DownHyst__SHIFT 0x18 #define DPM_TABLE_28__GraphicsLevel_0_reserved_MASK 0xffffffff #define DPM_TABLE_28__GraphicsLevel_0_reserved__SHIFT 0x0 #define DPM_TABLE_29__GraphicsLevel_1_MinVddNb_MASK 0xffffffff #define DPM_TABLE_29__GraphicsLevel_1_MinVddNb__SHIFT 0x0 #define DPM_TABLE_30__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_30__GraphicsLevel_1_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_31__GraphicsLevel_1_ActivityLevel_MASK 0xffff #define DPM_TABLE_31__GraphicsLevel_1_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_31__GraphicsLevel_1_VidOffset_MASK 0xff0000 #define DPM_TABLE_31__GraphicsLevel_1_VidOffset__SHIFT 0x10 #define DPM_TABLE_31__GraphicsLevel_1_Vid_MASK 0xff000000 #define DPM_TABLE_31__GraphicsLevel_1_Vid__SHIFT 0x18 #define DPM_TABLE_32__GraphicsLevel_1_SclkDid_MASK 0xff #define DPM_TABLE_32__GraphicsLevel_1_SclkDid__SHIFT 0x0 #define DPM_TABLE_32__GraphicsLevel_1_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_32__GraphicsLevel_1_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_32__GraphicsLevel_1_GnbSlow_MASK 0xff0000 #define DPM_TABLE_32__GraphicsLevel_1_GnbSlow__SHIFT 0x10 #define DPM_TABLE_32__GraphicsLevel_1_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_32__GraphicsLevel_1_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_33__GraphicsLevel_1_UpHyst_MASK 0xff #define DPM_TABLE_33__GraphicsLevel_1_UpHyst__SHIFT 0x0 #define DPM_TABLE_33__GraphicsLevel_1_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_33__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_33__GraphicsLevel_1_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_33__GraphicsLevel_1_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_33__GraphicsLevel_1_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_33__GraphicsLevel_1_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_34__GraphicsLevel_1_ClkBypassCntl_MASK 0xff #define DPM_TABLE_34__GraphicsLevel_1_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_34__GraphicsLevel_1_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_34__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_34__GraphicsLevel_1_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_34__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_34__GraphicsLevel_1_DownHyst_MASK 0xff000000 #define DPM_TABLE_34__GraphicsLevel_1_DownHyst__SHIFT 0x18 #define DPM_TABLE_35__GraphicsLevel_1_reserved_MASK 0xffffffff #define DPM_TABLE_35__GraphicsLevel_1_reserved__SHIFT 0x0 #define DPM_TABLE_36__GraphicsLevel_2_MinVddNb_MASK 0xffffffff #define DPM_TABLE_36__GraphicsLevel_2_MinVddNb__SHIFT 0x0 #define DPM_TABLE_37__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_37__GraphicsLevel_2_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_38__GraphicsLevel_2_ActivityLevel_MASK 0xffff #define DPM_TABLE_38__GraphicsLevel_2_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_38__GraphicsLevel_2_VidOffset_MASK 0xff0000 #define DPM_TABLE_38__GraphicsLevel_2_VidOffset__SHIFT 0x10 #define DPM_TABLE_38__GraphicsLevel_2_Vid_MASK 0xff000000 #define DPM_TABLE_38__GraphicsLevel_2_Vid__SHIFT 0x18 #define DPM_TABLE_39__GraphicsLevel_2_SclkDid_MASK 0xff #define DPM_TABLE_39__GraphicsLevel_2_SclkDid__SHIFT 0x0 #define DPM_TABLE_39__GraphicsLevel_2_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_39__GraphicsLevel_2_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_39__GraphicsLevel_2_GnbSlow_MASK 0xff0000 #define DPM_TABLE_39__GraphicsLevel_2_GnbSlow__SHIFT 0x10 #define DPM_TABLE_39__GraphicsLevel_2_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_39__GraphicsLevel_2_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_40__GraphicsLevel_2_UpHyst_MASK 0xff #define DPM_TABLE_40__GraphicsLevel_2_UpHyst__SHIFT 0x0 #define DPM_TABLE_40__GraphicsLevel_2_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_40__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_40__GraphicsLevel_2_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_40__GraphicsLevel_2_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_40__GraphicsLevel_2_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_40__GraphicsLevel_2_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_41__GraphicsLevel_2_ClkBypassCntl_MASK 0xff #define DPM_TABLE_41__GraphicsLevel_2_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_41__GraphicsLevel_2_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_41__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_41__GraphicsLevel_2_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_41__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_41__GraphicsLevel_2_DownHyst_MASK 0xff000000 #define DPM_TABLE_41__GraphicsLevel_2_DownHyst__SHIFT 0x18 #define DPM_TABLE_42__GraphicsLevel_2_reserved_MASK 0xffffffff #define DPM_TABLE_42__GraphicsLevel_2_reserved__SHIFT 0x0 #define DPM_TABLE_43__GraphicsLevel_3_MinVddNb_MASK 0xffffffff #define DPM_TABLE_43__GraphicsLevel_3_MinVddNb__SHIFT 0x0 #define DPM_TABLE_44__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_44__GraphicsLevel_3_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_45__GraphicsLevel_3_ActivityLevel_MASK 0xffff #define DPM_TABLE_45__GraphicsLevel_3_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_45__GraphicsLevel_3_VidOffset_MASK 0xff0000 #define DPM_TABLE_45__GraphicsLevel_3_VidOffset__SHIFT 0x10 #define DPM_TABLE_45__GraphicsLevel_3_Vid_MASK 0xff000000 #define DPM_TABLE_45__GraphicsLevel_3_Vid__SHIFT 0x18 #define DPM_TABLE_46__GraphicsLevel_3_SclkDid_MASK 0xff #define DPM_TABLE_46__GraphicsLevel_3_SclkDid__SHIFT 0x0 #define DPM_TABLE_46__GraphicsLevel_3_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_46__GraphicsLevel_3_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_46__GraphicsLevel_3_GnbSlow_MASK 0xff0000 #define DPM_TABLE_46__GraphicsLevel_3_GnbSlow__SHIFT 0x10 #define DPM_TABLE_46__GraphicsLevel_3_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_46__GraphicsLevel_3_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_47__GraphicsLevel_3_UpHyst_MASK 0xff #define DPM_TABLE_47__GraphicsLevel_3_UpHyst__SHIFT 0x0 #define DPM_TABLE_47__GraphicsLevel_3_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_47__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_47__GraphicsLevel_3_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_47__GraphicsLevel_3_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_47__GraphicsLevel_3_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_47__GraphicsLevel_3_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_48__GraphicsLevel_3_ClkBypassCntl_MASK 0xff #define DPM_TABLE_48__GraphicsLevel_3_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_48__GraphicsLevel_3_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_48__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_48__GraphicsLevel_3_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_48__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_48__GraphicsLevel_3_DownHyst_MASK 0xff000000 #define DPM_TABLE_48__GraphicsLevel_3_DownHyst__SHIFT 0x18 #define DPM_TABLE_49__GraphicsLevel_3_reserved_MASK 0xffffffff #define DPM_TABLE_49__GraphicsLevel_3_reserved__SHIFT 0x0 #define DPM_TABLE_50__GraphicsLevel_4_MinVddNb_MASK 0xffffffff #define DPM_TABLE_50__GraphicsLevel_4_MinVddNb__SHIFT 0x0 #define DPM_TABLE_51__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_51__GraphicsLevel_4_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_52__GraphicsLevel_4_ActivityLevel_MASK 0xffff #define DPM_TABLE_52__GraphicsLevel_4_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_52__GraphicsLevel_4_VidOffset_MASK 0xff0000 #define DPM_TABLE_52__GraphicsLevel_4_VidOffset__SHIFT 0x10 #define DPM_TABLE_52__GraphicsLevel_4_Vid_MASK 0xff000000 #define DPM_TABLE_52__GraphicsLevel_4_Vid__SHIFT 0x18 #define DPM_TABLE_53__GraphicsLevel_4_SclkDid_MASK 0xff #define DPM_TABLE_53__GraphicsLevel_4_SclkDid__SHIFT 0x0 #define DPM_TABLE_53__GraphicsLevel_4_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_53__GraphicsLevel_4_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_53__GraphicsLevel_4_GnbSlow_MASK 0xff0000 #define DPM_TABLE_53__GraphicsLevel_4_GnbSlow__SHIFT 0x10 #define DPM_TABLE_53__GraphicsLevel_4_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_53__GraphicsLevel_4_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_54__GraphicsLevel_4_UpHyst_MASK 0xff #define DPM_TABLE_54__GraphicsLevel_4_UpHyst__SHIFT 0x0 #define DPM_TABLE_54__GraphicsLevel_4_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_54__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_54__GraphicsLevel_4_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_54__GraphicsLevel_4_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_54__GraphicsLevel_4_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_54__GraphicsLevel_4_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_55__GraphicsLevel_4_ClkBypassCntl_MASK 0xff #define DPM_TABLE_55__GraphicsLevel_4_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_55__GraphicsLevel_4_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_55__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_55__GraphicsLevel_4_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_55__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_55__GraphicsLevel_4_DownHyst_MASK 0xff000000 #define DPM_TABLE_55__GraphicsLevel_4_DownHyst__SHIFT 0x18 #define DPM_TABLE_56__GraphicsLevel_4_reserved_MASK 0xffffffff #define DPM_TABLE_56__GraphicsLevel_4_reserved__SHIFT 0x0 #define DPM_TABLE_57__GraphicsLevel_5_MinVddNb_MASK 0xffffffff #define DPM_TABLE_57__GraphicsLevel_5_MinVddNb__SHIFT 0x0 #define DPM_TABLE_58__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_58__GraphicsLevel_5_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_59__GraphicsLevel_5_ActivityLevel_MASK 0xffff #define DPM_TABLE_59__GraphicsLevel_5_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_59__GraphicsLevel_5_VidOffset_MASK 0xff0000 #define DPM_TABLE_59__GraphicsLevel_5_VidOffset__SHIFT 0x10 #define DPM_TABLE_59__GraphicsLevel_5_Vid_MASK 0xff000000 #define DPM_TABLE_59__GraphicsLevel_5_Vid__SHIFT 0x18 #define DPM_TABLE_60__GraphicsLevel_5_SclkDid_MASK 0xff #define DPM_TABLE_60__GraphicsLevel_5_SclkDid__SHIFT 0x0 #define DPM_TABLE_60__GraphicsLevel_5_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_60__GraphicsLevel_5_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_60__GraphicsLevel_5_GnbSlow_MASK 0xff0000 #define DPM_TABLE_60__GraphicsLevel_5_GnbSlow__SHIFT 0x10 #define DPM_TABLE_60__GraphicsLevel_5_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_60__GraphicsLevel_5_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_61__GraphicsLevel_5_UpHyst_MASK 0xff #define DPM_TABLE_61__GraphicsLevel_5_UpHyst__SHIFT 0x0 #define DPM_TABLE_61__GraphicsLevel_5_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_61__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_61__GraphicsLevel_5_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_61__GraphicsLevel_5_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_61__GraphicsLevel_5_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_61__GraphicsLevel_5_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_62__GraphicsLevel_5_ClkBypassCntl_MASK 0xff #define DPM_TABLE_62__GraphicsLevel_5_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_62__GraphicsLevel_5_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_62__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_62__GraphicsLevel_5_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_62__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_62__GraphicsLevel_5_DownHyst_MASK 0xff000000 #define DPM_TABLE_62__GraphicsLevel_5_DownHyst__SHIFT 0x18 #define DPM_TABLE_63__GraphicsLevel_5_reserved_MASK 0xffffffff #define DPM_TABLE_63__GraphicsLevel_5_reserved__SHIFT 0x0 #define DPM_TABLE_64__GraphicsLevel_6_MinVddNb_MASK 0xffffffff #define DPM_TABLE_64__GraphicsLevel_6_MinVddNb__SHIFT 0x0 #define DPM_TABLE_65__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_65__GraphicsLevel_6_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_66__GraphicsLevel_6_ActivityLevel_MASK 0xffff #define DPM_TABLE_66__GraphicsLevel_6_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_66__GraphicsLevel_6_VidOffset_MASK 0xff0000 #define DPM_TABLE_66__GraphicsLevel_6_VidOffset__SHIFT 0x10 #define DPM_TABLE_66__GraphicsLevel_6_Vid_MASK 0xff000000 #define DPM_TABLE_66__GraphicsLevel_6_Vid__SHIFT 0x18 #define DPM_TABLE_67__GraphicsLevel_6_SclkDid_MASK 0xff #define DPM_TABLE_67__GraphicsLevel_6_SclkDid__SHIFT 0x0 #define DPM_TABLE_67__GraphicsLevel_6_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_67__GraphicsLevel_6_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_67__GraphicsLevel_6_GnbSlow_MASK 0xff0000 #define DPM_TABLE_67__GraphicsLevel_6_GnbSlow__SHIFT 0x10 #define DPM_TABLE_67__GraphicsLevel_6_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_67__GraphicsLevel_6_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_68__GraphicsLevel_6_UpHyst_MASK 0xff #define DPM_TABLE_68__GraphicsLevel_6_UpHyst__SHIFT 0x0 #define DPM_TABLE_68__GraphicsLevel_6_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_68__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_68__GraphicsLevel_6_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_68__GraphicsLevel_6_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_68__GraphicsLevel_6_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_68__GraphicsLevel_6_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_69__GraphicsLevel_6_ClkBypassCntl_MASK 0xff #define DPM_TABLE_69__GraphicsLevel_6_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_69__GraphicsLevel_6_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_69__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_69__GraphicsLevel_6_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_69__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_69__GraphicsLevel_6_DownHyst_MASK 0xff000000 #define DPM_TABLE_69__GraphicsLevel_6_DownHyst__SHIFT 0x18 #define DPM_TABLE_70__GraphicsLevel_6_reserved_MASK 0xffffffff #define DPM_TABLE_70__GraphicsLevel_6_reserved__SHIFT 0x0 #define DPM_TABLE_71__GraphicsLevel_7_MinVddNb_MASK 0xffffffff #define DPM_TABLE_71__GraphicsLevel_7_MinVddNb__SHIFT 0x0 #define DPM_TABLE_72__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_72__GraphicsLevel_7_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_73__GraphicsLevel_7_ActivityLevel_MASK 0xffff #define DPM_TABLE_73__GraphicsLevel_7_ActivityLevel__SHIFT 0x0 #define DPM_TABLE_73__GraphicsLevel_7_VidOffset_MASK 0xff0000 #define DPM_TABLE_73__GraphicsLevel_7_VidOffset__SHIFT 0x10 #define DPM_TABLE_73__GraphicsLevel_7_Vid_MASK 0xff000000 #define DPM_TABLE_73__GraphicsLevel_7_Vid__SHIFT 0x18 #define DPM_TABLE_74__GraphicsLevel_7_SclkDid_MASK 0xff #define DPM_TABLE_74__GraphicsLevel_7_SclkDid__SHIFT 0x0 #define DPM_TABLE_74__GraphicsLevel_7_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_74__GraphicsLevel_7_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_74__GraphicsLevel_7_GnbSlow_MASK 0xff0000 #define DPM_TABLE_74__GraphicsLevel_7_GnbSlow__SHIFT 0x10 #define DPM_TABLE_74__GraphicsLevel_7_PowerThrottle_MASK 0xff000000 #define DPM_TABLE_74__GraphicsLevel_7_PowerThrottle__SHIFT 0x18 #define DPM_TABLE_75__GraphicsLevel_7_UpHyst_MASK 0xff #define DPM_TABLE_75__GraphicsLevel_7_UpHyst__SHIFT 0x0 #define DPM_TABLE_75__GraphicsLevel_7_EnabledForThrottle_MASK 0xff00 #define DPM_TABLE_75__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x8 #define DPM_TABLE_75__GraphicsLevel_7_EnabledForActivity_MASK 0xff0000 #define DPM_TABLE_75__GraphicsLevel_7_EnabledForActivity__SHIFT 0x10 #define DPM_TABLE_75__GraphicsLevel_7_DisplayWatermark_MASK 0xff000000 #define DPM_TABLE_75__GraphicsLevel_7_DisplayWatermark__SHIFT 0x18 #define DPM_TABLE_76__GraphicsLevel_7_ClkBypassCntl_MASK 0xff #define DPM_TABLE_76__GraphicsLevel_7_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_76__GraphicsLevel_7_DeepSleepDivId_MASK 0xff00 #define DPM_TABLE_76__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x8 #define DPM_TABLE_76__GraphicsLevel_7_VoltageDownHyst_MASK 0xff0000 #define DPM_TABLE_76__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x10 #define DPM_TABLE_76__GraphicsLevel_7_DownHyst_MASK 0xff000000 #define DPM_TABLE_76__GraphicsLevel_7_DownHyst__SHIFT 0x18 #define DPM_TABLE_77__GraphicsLevel_7_reserved_MASK 0xffffffff #define DPM_TABLE_77__GraphicsLevel_7_reserved__SHIFT 0x0 #define DPM_TABLE_78__ACPILevel_Flags_MASK 0xffffffff #define DPM_TABLE_78__ACPILevel_Flags__SHIFT 0x0 #define DPM_TABLE_79__ACPILevel_MinVddNb_MASK 0xffffffff #define DPM_TABLE_79__ACPILevel_MinVddNb__SHIFT 0x0 #define DPM_TABLE_80__ACPILevel_SclkFrequency_MASK 0xffffffff #define DPM_TABLE_80__ACPILevel_SclkFrequency__SHIFT 0x0 #define DPM_TABLE_81__ACPILevel_DisplayWatermark_MASK 0xff #define DPM_TABLE_81__ACPILevel_DisplayWatermark__SHIFT 0x0 #define DPM_TABLE_81__ACPILevel_ForceNbPs1_MASK 0xff00 #define DPM_TABLE_81__ACPILevel_ForceNbPs1__SHIFT 0x8 #define DPM_TABLE_81__ACPILevel_GnbSlow_MASK 0xff0000 #define DPM_TABLE_81__ACPILevel_GnbSlow__SHIFT 0x10 #define DPM_TABLE_81__ACPILevel_SclkDid_MASK 0xff000000 #define DPM_TABLE_81__ACPILevel_SclkDid__SHIFT 0x18 #define DPM_TABLE_82__ACPILevel_padding_2_MASK 0xff #define DPM_TABLE_82__ACPILevel_padding_2__SHIFT 0x0 #define DPM_TABLE_82__ACPILevel_padding_1_MASK 0xff00 #define DPM_TABLE_82__ACPILevel_padding_1__SHIFT 0x8 #define DPM_TABLE_82__ACPILevel_padding_0_MASK 0xff0000 #define DPM_TABLE_82__ACPILevel_padding_0__SHIFT 0x10 #define DPM_TABLE_82__ACPILevel_DeepSleepDivId_MASK 0xff000000 #define DPM_TABLE_82__ACPILevel_DeepSleepDivId__SHIFT 0x18 #define DPM_TABLE_83__UvdLevel_0_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_83__UvdLevel_0_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_84__UvdLevel_0_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_84__UvdLevel_0_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_85__UvdLevel_0_DclkDivider_MASK 0xff #define DPM_TABLE_85__UvdLevel_0_DclkDivider__SHIFT 0x0 #define DPM_TABLE_85__UvdLevel_0_VclkDivider_MASK 0xff00 #define DPM_TABLE_85__UvdLevel_0_VclkDivider__SHIFT 0x8 #define DPM_TABLE_85__UvdLevel_0_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_85__UvdLevel_0_MinVddNb__SHIFT 0x10 #define DPM_TABLE_86__UvdLevel_0_padding_1_MASK 0xff #define DPM_TABLE_86__UvdLevel_0_padding_1__SHIFT 0x0 #define DPM_TABLE_86__UvdLevel_0_padding_0_MASK 0xff00 #define DPM_TABLE_86__UvdLevel_0_padding_0__SHIFT 0x8 #define DPM_TABLE_86__UvdLevel_0_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_86__UvdLevel_0_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_86__UvdLevel_0_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_86__UvdLevel_0_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_87__UvdLevel_1_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_87__UvdLevel_1_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_88__UvdLevel_1_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_88__UvdLevel_1_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_89__UvdLevel_1_DclkDivider_MASK 0xff #define DPM_TABLE_89__UvdLevel_1_DclkDivider__SHIFT 0x0 #define DPM_TABLE_89__UvdLevel_1_VclkDivider_MASK 0xff00 #define DPM_TABLE_89__UvdLevel_1_VclkDivider__SHIFT 0x8 #define DPM_TABLE_89__UvdLevel_1_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_89__UvdLevel_1_MinVddNb__SHIFT 0x10 #define DPM_TABLE_90__UvdLevel_1_padding_1_MASK 0xff #define DPM_TABLE_90__UvdLevel_1_padding_1__SHIFT 0x0 #define DPM_TABLE_90__UvdLevel_1_padding_0_MASK 0xff00 #define DPM_TABLE_90__UvdLevel_1_padding_0__SHIFT 0x8 #define DPM_TABLE_90__UvdLevel_1_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_90__UvdLevel_1_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_90__UvdLevel_1_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_90__UvdLevel_1_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_91__UvdLevel_2_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_91__UvdLevel_2_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_92__UvdLevel_2_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_92__UvdLevel_2_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_93__UvdLevel_2_DclkDivider_MASK 0xff #define DPM_TABLE_93__UvdLevel_2_DclkDivider__SHIFT 0x0 #define DPM_TABLE_93__UvdLevel_2_VclkDivider_MASK 0xff00 #define DPM_TABLE_93__UvdLevel_2_VclkDivider__SHIFT 0x8 #define DPM_TABLE_93__UvdLevel_2_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_93__UvdLevel_2_MinVddNb__SHIFT 0x10 #define DPM_TABLE_94__UvdLevel_2_padding_1_MASK 0xff #define DPM_TABLE_94__UvdLevel_2_padding_1__SHIFT 0x0 #define DPM_TABLE_94__UvdLevel_2_padding_0_MASK 0xff00 #define DPM_TABLE_94__UvdLevel_2_padding_0__SHIFT 0x8 #define DPM_TABLE_94__UvdLevel_2_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_94__UvdLevel_2_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_94__UvdLevel_2_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_94__UvdLevel_2_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_95__UvdLevel_3_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_95__UvdLevel_3_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_96__UvdLevel_3_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_96__UvdLevel_3_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_97__UvdLevel_3_DclkDivider_MASK 0xff #define DPM_TABLE_97__UvdLevel_3_DclkDivider__SHIFT 0x0 #define DPM_TABLE_97__UvdLevel_3_VclkDivider_MASK 0xff00 #define DPM_TABLE_97__UvdLevel_3_VclkDivider__SHIFT 0x8 #define DPM_TABLE_97__UvdLevel_3_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_97__UvdLevel_3_MinVddNb__SHIFT 0x10 #define DPM_TABLE_98__UvdLevel_3_padding_1_MASK 0xff #define DPM_TABLE_98__UvdLevel_3_padding_1__SHIFT 0x0 #define DPM_TABLE_98__UvdLevel_3_padding_0_MASK 0xff00 #define DPM_TABLE_98__UvdLevel_3_padding_0__SHIFT 0x8 #define DPM_TABLE_98__UvdLevel_3_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_98__UvdLevel_3_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_98__UvdLevel_3_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_98__UvdLevel_3_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_99__UvdLevel_4_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_99__UvdLevel_4_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_100__UvdLevel_4_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_100__UvdLevel_4_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_101__UvdLevel_4_DclkDivider_MASK 0xff #define DPM_TABLE_101__UvdLevel_4_DclkDivider__SHIFT 0x0 #define DPM_TABLE_101__UvdLevel_4_VclkDivider_MASK 0xff00 #define DPM_TABLE_101__UvdLevel_4_VclkDivider__SHIFT 0x8 #define DPM_TABLE_101__UvdLevel_4_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_101__UvdLevel_4_MinVddNb__SHIFT 0x10 #define DPM_TABLE_102__UvdLevel_4_padding_1_MASK 0xff #define DPM_TABLE_102__UvdLevel_4_padding_1__SHIFT 0x0 #define DPM_TABLE_102__UvdLevel_4_padding_0_MASK 0xff00 #define DPM_TABLE_102__UvdLevel_4_padding_0__SHIFT 0x8 #define DPM_TABLE_102__UvdLevel_4_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_102__UvdLevel_4_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_102__UvdLevel_4_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_102__UvdLevel_4_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_103__UvdLevel_5_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_103__UvdLevel_5_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_104__UvdLevel_5_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_104__UvdLevel_5_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_105__UvdLevel_5_DclkDivider_MASK 0xff #define DPM_TABLE_105__UvdLevel_5_DclkDivider__SHIFT 0x0 #define DPM_TABLE_105__UvdLevel_5_VclkDivider_MASK 0xff00 #define DPM_TABLE_105__UvdLevel_5_VclkDivider__SHIFT 0x8 #define DPM_TABLE_105__UvdLevel_5_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_105__UvdLevel_5_MinVddNb__SHIFT 0x10 #define DPM_TABLE_106__UvdLevel_5_padding_1_MASK 0xff #define DPM_TABLE_106__UvdLevel_5_padding_1__SHIFT 0x0 #define DPM_TABLE_106__UvdLevel_5_padding_0_MASK 0xff00 #define DPM_TABLE_106__UvdLevel_5_padding_0__SHIFT 0x8 #define DPM_TABLE_106__UvdLevel_5_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_106__UvdLevel_5_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_106__UvdLevel_5_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_106__UvdLevel_5_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_107__UvdLevel_6_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_107__UvdLevel_6_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_108__UvdLevel_6_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_108__UvdLevel_6_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_109__UvdLevel_6_DclkDivider_MASK 0xff #define DPM_TABLE_109__UvdLevel_6_DclkDivider__SHIFT 0x0 #define DPM_TABLE_109__UvdLevel_6_VclkDivider_MASK 0xff00 #define DPM_TABLE_109__UvdLevel_6_VclkDivider__SHIFT 0x8 #define DPM_TABLE_109__UvdLevel_6_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_109__UvdLevel_6_MinVddNb__SHIFT 0x10 #define DPM_TABLE_110__UvdLevel_6_padding_1_MASK 0xff #define DPM_TABLE_110__UvdLevel_6_padding_1__SHIFT 0x0 #define DPM_TABLE_110__UvdLevel_6_padding_0_MASK 0xff00 #define DPM_TABLE_110__UvdLevel_6_padding_0__SHIFT 0x8 #define DPM_TABLE_110__UvdLevel_6_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_110__UvdLevel_6_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_110__UvdLevel_6_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_110__UvdLevel_6_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_111__UvdLevel_7_VclkFrequency_MASK 0xffffffff #define DPM_TABLE_111__UvdLevel_7_VclkFrequency__SHIFT 0x0 #define DPM_TABLE_112__UvdLevel_7_DclkFrequency_MASK 0xffffffff #define DPM_TABLE_112__UvdLevel_7_DclkFrequency__SHIFT 0x0 #define DPM_TABLE_113__UvdLevel_7_DclkDivider_MASK 0xff #define DPM_TABLE_113__UvdLevel_7_DclkDivider__SHIFT 0x0 #define DPM_TABLE_113__UvdLevel_7_VclkDivider_MASK 0xff00 #define DPM_TABLE_113__UvdLevel_7_VclkDivider__SHIFT 0x8 #define DPM_TABLE_113__UvdLevel_7_MinVddNb_MASK 0xffff0000 #define DPM_TABLE_113__UvdLevel_7_MinVddNb__SHIFT 0x10 #define DPM_TABLE_114__UvdLevel_7_padding_1_MASK 0xff #define DPM_TABLE_114__UvdLevel_7_padding_1__SHIFT 0x0 #define DPM_TABLE_114__UvdLevel_7_padding_0_MASK 0xff00 #define DPM_TABLE_114__UvdLevel_7_padding_0__SHIFT 0x8 #define DPM_TABLE_114__UvdLevel_7_DClkBypassCntl_MASK 0xff0000 #define DPM_TABLE_114__UvdLevel_7_DClkBypassCntl__SHIFT 0x10 #define DPM_TABLE_114__UvdLevel_7_VClkBypassCntl_MASK 0xff000000 #define DPM_TABLE_114__UvdLevel_7_VClkBypassCntl__SHIFT 0x18 #define DPM_TABLE_115__VceLevel_0_Frequency_MASK 0xffffffff #define DPM_TABLE_115__VceLevel_0_Frequency__SHIFT 0x0 #define DPM_TABLE_116__VceLevel_0_ClkBypassCntl_MASK 0xff #define DPM_TABLE_116__VceLevel_0_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_116__VceLevel_0_Divider_MASK 0xff00 #define DPM_TABLE_116__VceLevel_0_Divider__SHIFT 0x8 #define DPM_TABLE_116__VceLevel_0_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_116__VceLevel_0_MinVoltage__SHIFT 0x10 #define DPM_TABLE_117__VceLevel_0_Reserved_MASK 0xffffffff #define DPM_TABLE_117__VceLevel_0_Reserved__SHIFT 0x0 #define DPM_TABLE_118__VceLevel_1_Frequency_MASK 0xffffffff #define DPM_TABLE_118__VceLevel_1_Frequency__SHIFT 0x0 #define DPM_TABLE_119__VceLevel_1_ClkBypassCntl_MASK 0xff #define DPM_TABLE_119__VceLevel_1_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_119__VceLevel_1_Divider_MASK 0xff00 #define DPM_TABLE_119__VceLevel_1_Divider__SHIFT 0x8 #define DPM_TABLE_119__VceLevel_1_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_119__VceLevel_1_MinVoltage__SHIFT 0x10 #define DPM_TABLE_120__VceLevel_1_Reserved_MASK 0xffffffff #define DPM_TABLE_120__VceLevel_1_Reserved__SHIFT 0x0 #define DPM_TABLE_121__VceLevel_2_Frequency_MASK 0xffffffff #define DPM_TABLE_121__VceLevel_2_Frequency__SHIFT 0x0 #define DPM_TABLE_122__VceLevel_2_ClkBypassCntl_MASK 0xff #define DPM_TABLE_122__VceLevel_2_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_122__VceLevel_2_Divider_MASK 0xff00 #define DPM_TABLE_122__VceLevel_2_Divider__SHIFT 0x8 #define DPM_TABLE_122__VceLevel_2_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_122__VceLevel_2_MinVoltage__SHIFT 0x10 #define DPM_TABLE_123__VceLevel_2_Reserved_MASK 0xffffffff #define DPM_TABLE_123__VceLevel_2_Reserved__SHIFT 0x0 #define DPM_TABLE_124__VceLevel_3_Frequency_MASK 0xffffffff #define DPM_TABLE_124__VceLevel_3_Frequency__SHIFT 0x0 #define DPM_TABLE_125__VceLevel_3_ClkBypassCntl_MASK 0xff #define DPM_TABLE_125__VceLevel_3_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_125__VceLevel_3_Divider_MASK 0xff00 #define DPM_TABLE_125__VceLevel_3_Divider__SHIFT 0x8 #define DPM_TABLE_125__VceLevel_3_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_125__VceLevel_3_MinVoltage__SHIFT 0x10 #define DPM_TABLE_126__VceLevel_3_Reserved_MASK 0xffffffff #define DPM_TABLE_126__VceLevel_3_Reserved__SHIFT 0x0 #define DPM_TABLE_127__VceLevel_4_Frequency_MASK 0xffffffff #define DPM_TABLE_127__VceLevel_4_Frequency__SHIFT 0x0 #define DPM_TABLE_128__VceLevel_4_ClkBypassCntl_MASK 0xff #define DPM_TABLE_128__VceLevel_4_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_128__VceLevel_4_Divider_MASK 0xff00 #define DPM_TABLE_128__VceLevel_4_Divider__SHIFT 0x8 #define DPM_TABLE_128__VceLevel_4_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_128__VceLevel_4_MinVoltage__SHIFT 0x10 #define DPM_TABLE_129__VceLevel_4_Reserved_MASK 0xffffffff #define DPM_TABLE_129__VceLevel_4_Reserved__SHIFT 0x0 #define DPM_TABLE_130__VceLevel_5_Frequency_MASK 0xffffffff #define DPM_TABLE_130__VceLevel_5_Frequency__SHIFT 0x0 #define DPM_TABLE_131__VceLevel_5_ClkBypassCntl_MASK 0xff #define DPM_TABLE_131__VceLevel_5_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_131__VceLevel_5_Divider_MASK 0xff00 #define DPM_TABLE_131__VceLevel_5_Divider__SHIFT 0x8 #define DPM_TABLE_131__VceLevel_5_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_131__VceLevel_5_MinVoltage__SHIFT 0x10 #define DPM_TABLE_132__VceLevel_5_Reserved_MASK 0xffffffff #define DPM_TABLE_132__VceLevel_5_Reserved__SHIFT 0x0 #define DPM_TABLE_133__VceLevel_6_Frequency_MASK 0xffffffff #define DPM_TABLE_133__VceLevel_6_Frequency__SHIFT 0x0 #define DPM_TABLE_134__VceLevel_6_ClkBypassCntl_MASK 0xff #define DPM_TABLE_134__VceLevel_6_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_134__VceLevel_6_Divider_MASK 0xff00 #define DPM_TABLE_134__VceLevel_6_Divider__SHIFT 0x8 #define DPM_TABLE_134__VceLevel_6_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_134__VceLevel_6_MinVoltage__SHIFT 0x10 #define DPM_TABLE_135__VceLevel_6_Reserved_MASK 0xffffffff #define DPM_TABLE_135__VceLevel_6_Reserved__SHIFT 0x0 #define DPM_TABLE_136__VceLevel_7_Frequency_MASK 0xffffffff #define DPM_TABLE_136__VceLevel_7_Frequency__SHIFT 0x0 #define DPM_TABLE_137__VceLevel_7_ClkBypassCntl_MASK 0xff #define DPM_TABLE_137__VceLevel_7_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_137__VceLevel_7_Divider_MASK 0xff00 #define DPM_TABLE_137__VceLevel_7_Divider__SHIFT 0x8 #define DPM_TABLE_137__VceLevel_7_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_137__VceLevel_7_MinVoltage__SHIFT 0x10 #define DPM_TABLE_138__VceLevel_7_Reserved_MASK 0xffffffff #define DPM_TABLE_138__VceLevel_7_Reserved__SHIFT 0x0 #define DPM_TABLE_139__AcpLevel_0_Frequency_MASK 0xffffffff #define DPM_TABLE_139__AcpLevel_0_Frequency__SHIFT 0x0 #define DPM_TABLE_140__AcpLevel_0_ClkBypassCntl_MASK 0xff #define DPM_TABLE_140__AcpLevel_0_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_140__AcpLevel_0_Divider_MASK 0xff00 #define DPM_TABLE_140__AcpLevel_0_Divider__SHIFT 0x8 #define DPM_TABLE_140__AcpLevel_0_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_140__AcpLevel_0_MinVoltage__SHIFT 0x10 #define DPM_TABLE_141__AcpLevel_0_Reserved_MASK 0xffffffff #define DPM_TABLE_141__AcpLevel_0_Reserved__SHIFT 0x0 #define DPM_TABLE_142__AcpLevel_1_Frequency_MASK 0xffffffff #define DPM_TABLE_142__AcpLevel_1_Frequency__SHIFT 0x0 #define DPM_TABLE_143__AcpLevel_1_ClkBypassCntl_MASK 0xff #define DPM_TABLE_143__AcpLevel_1_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_143__AcpLevel_1_Divider_MASK 0xff00 #define DPM_TABLE_143__AcpLevel_1_Divider__SHIFT 0x8 #define DPM_TABLE_143__AcpLevel_1_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_143__AcpLevel_1_MinVoltage__SHIFT 0x10 #define DPM_TABLE_144__AcpLevel_1_Reserved_MASK 0xffffffff #define DPM_TABLE_144__AcpLevel_1_Reserved__SHIFT 0x0 #define DPM_TABLE_145__AcpLevel_2_Frequency_MASK 0xffffffff #define DPM_TABLE_145__AcpLevel_2_Frequency__SHIFT 0x0 #define DPM_TABLE_146__AcpLevel_2_ClkBypassCntl_MASK 0xff #define DPM_TABLE_146__AcpLevel_2_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_146__AcpLevel_2_Divider_MASK 0xff00 #define DPM_TABLE_146__AcpLevel_2_Divider__SHIFT 0x8 #define DPM_TABLE_146__AcpLevel_2_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_146__AcpLevel_2_MinVoltage__SHIFT 0x10 #define DPM_TABLE_147__AcpLevel_2_Reserved_MASK 0xffffffff #define DPM_TABLE_147__AcpLevel_2_Reserved__SHIFT 0x0 #define DPM_TABLE_148__AcpLevel_3_Frequency_MASK 0xffffffff #define DPM_TABLE_148__AcpLevel_3_Frequency__SHIFT 0x0 #define DPM_TABLE_149__AcpLevel_3_ClkBypassCntl_MASK 0xff #define DPM_TABLE_149__AcpLevel_3_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_149__AcpLevel_3_Divider_MASK 0xff00 #define DPM_TABLE_149__AcpLevel_3_Divider__SHIFT 0x8 #define DPM_TABLE_149__AcpLevel_3_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_149__AcpLevel_3_MinVoltage__SHIFT 0x10 #define DPM_TABLE_150__AcpLevel_3_Reserved_MASK 0xffffffff #define DPM_TABLE_150__AcpLevel_3_Reserved__SHIFT 0x0 #define DPM_TABLE_151__AcpLevel_4_Frequency_MASK 0xffffffff #define DPM_TABLE_151__AcpLevel_4_Frequency__SHIFT 0x0 #define DPM_TABLE_152__AcpLevel_4_ClkBypassCntl_MASK 0xff #define DPM_TABLE_152__AcpLevel_4_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_152__AcpLevel_4_Divider_MASK 0xff00 #define DPM_TABLE_152__AcpLevel_4_Divider__SHIFT 0x8 #define DPM_TABLE_152__AcpLevel_4_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_152__AcpLevel_4_MinVoltage__SHIFT 0x10 #define DPM_TABLE_153__AcpLevel_4_Reserved_MASK 0xffffffff #define DPM_TABLE_153__AcpLevel_4_Reserved__SHIFT 0x0 #define DPM_TABLE_154__AcpLevel_5_Frequency_MASK 0xffffffff #define DPM_TABLE_154__AcpLevel_5_Frequency__SHIFT 0x0 #define DPM_TABLE_155__AcpLevel_5_ClkBypassCntl_MASK 0xff #define DPM_TABLE_155__AcpLevel_5_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_155__AcpLevel_5_Divider_MASK 0xff00 #define DPM_TABLE_155__AcpLevel_5_Divider__SHIFT 0x8 #define DPM_TABLE_155__AcpLevel_5_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_155__AcpLevel_5_MinVoltage__SHIFT 0x10 #define DPM_TABLE_156__AcpLevel_5_Reserved_MASK 0xffffffff #define DPM_TABLE_156__AcpLevel_5_Reserved__SHIFT 0x0 #define DPM_TABLE_157__AcpLevel_6_Frequency_MASK 0xffffffff #define DPM_TABLE_157__AcpLevel_6_Frequency__SHIFT 0x0 #define DPM_TABLE_158__AcpLevel_6_ClkBypassCntl_MASK 0xff #define DPM_TABLE_158__AcpLevel_6_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_158__AcpLevel_6_Divider_MASK 0xff00 #define DPM_TABLE_158__AcpLevel_6_Divider__SHIFT 0x8 #define DPM_TABLE_158__AcpLevel_6_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_158__AcpLevel_6_MinVoltage__SHIFT 0x10 #define DPM_TABLE_159__AcpLevel_6_Reserved_MASK 0xffffffff #define DPM_TABLE_159__AcpLevel_6_Reserved__SHIFT 0x0 #define DPM_TABLE_160__AcpLevel_7_Frequency_MASK 0xffffffff #define DPM_TABLE_160__AcpLevel_7_Frequency__SHIFT 0x0 #define DPM_TABLE_161__AcpLevel_7_ClkBypassCntl_MASK 0xff #define DPM_TABLE_161__AcpLevel_7_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_161__AcpLevel_7_Divider_MASK 0xff00 #define DPM_TABLE_161__AcpLevel_7_Divider__SHIFT 0x8 #define DPM_TABLE_161__AcpLevel_7_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_161__AcpLevel_7_MinVoltage__SHIFT 0x10 #define DPM_TABLE_162__AcpLevel_7_Reserved_MASK 0xffffffff #define DPM_TABLE_162__AcpLevel_7_Reserved__SHIFT 0x0 #define DPM_TABLE_163__SamuLevel_0_Frequency_MASK 0xffffffff #define DPM_TABLE_163__SamuLevel_0_Frequency__SHIFT 0x0 #define DPM_TABLE_164__SamuLevel_0_ClkBypassCntl_MASK 0xff #define DPM_TABLE_164__SamuLevel_0_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_164__SamuLevel_0_Divider_MASK 0xff00 #define DPM_TABLE_164__SamuLevel_0_Divider__SHIFT 0x8 #define DPM_TABLE_164__SamuLevel_0_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_164__SamuLevel_0_MinVoltage__SHIFT 0x10 #define DPM_TABLE_165__SamuLevel_0_Reserved_MASK 0xffffffff #define DPM_TABLE_165__SamuLevel_0_Reserved__SHIFT 0x0 #define DPM_TABLE_166__SamuLevel_1_Frequency_MASK 0xffffffff #define DPM_TABLE_166__SamuLevel_1_Frequency__SHIFT 0x0 #define DPM_TABLE_167__SamuLevel_1_ClkBypassCntl_MASK 0xff #define DPM_TABLE_167__SamuLevel_1_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_167__SamuLevel_1_Divider_MASK 0xff00 #define DPM_TABLE_167__SamuLevel_1_Divider__SHIFT 0x8 #define DPM_TABLE_167__SamuLevel_1_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_167__SamuLevel_1_MinVoltage__SHIFT 0x10 #define DPM_TABLE_168__SamuLevel_1_Reserved_MASK 0xffffffff #define DPM_TABLE_168__SamuLevel_1_Reserved__SHIFT 0x0 #define DPM_TABLE_169__SamuLevel_2_Frequency_MASK 0xffffffff #define DPM_TABLE_169__SamuLevel_2_Frequency__SHIFT 0x0 #define DPM_TABLE_170__SamuLevel_2_ClkBypassCntl_MASK 0xff #define DPM_TABLE_170__SamuLevel_2_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_170__SamuLevel_2_Divider_MASK 0xff00 #define DPM_TABLE_170__SamuLevel_2_Divider__SHIFT 0x8 #define DPM_TABLE_170__SamuLevel_2_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_170__SamuLevel_2_MinVoltage__SHIFT 0x10 #define DPM_TABLE_171__SamuLevel_2_Reserved_MASK 0xffffffff #define DPM_TABLE_171__SamuLevel_2_Reserved__SHIFT 0x0 #define DPM_TABLE_172__SamuLevel_3_Frequency_MASK 0xffffffff #define DPM_TABLE_172__SamuLevel_3_Frequency__SHIFT 0x0 #define DPM_TABLE_173__SamuLevel_3_ClkBypassCntl_MASK 0xff #define DPM_TABLE_173__SamuLevel_3_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_173__SamuLevel_3_Divider_MASK 0xff00 #define DPM_TABLE_173__SamuLevel_3_Divider__SHIFT 0x8 #define DPM_TABLE_173__SamuLevel_3_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_173__SamuLevel_3_MinVoltage__SHIFT 0x10 #define DPM_TABLE_174__SamuLevel_3_Reserved_MASK 0xffffffff #define DPM_TABLE_174__SamuLevel_3_Reserved__SHIFT 0x0 #define DPM_TABLE_175__SamuLevel_4_Frequency_MASK 0xffffffff #define DPM_TABLE_175__SamuLevel_4_Frequency__SHIFT 0x0 #define DPM_TABLE_176__SamuLevel_4_ClkBypassCntl_MASK 0xff #define DPM_TABLE_176__SamuLevel_4_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_176__SamuLevel_4_Divider_MASK 0xff00 #define DPM_TABLE_176__SamuLevel_4_Divider__SHIFT 0x8 #define DPM_TABLE_176__SamuLevel_4_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_176__SamuLevel_4_MinVoltage__SHIFT 0x10 #define DPM_TABLE_177__SamuLevel_4_Reserved_MASK 0xffffffff #define DPM_TABLE_177__SamuLevel_4_Reserved__SHIFT 0x0 #define DPM_TABLE_178__SamuLevel_5_Frequency_MASK 0xffffffff #define DPM_TABLE_178__SamuLevel_5_Frequency__SHIFT 0x0 #define DPM_TABLE_179__SamuLevel_5_ClkBypassCntl_MASK 0xff #define DPM_TABLE_179__SamuLevel_5_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_179__SamuLevel_5_Divider_MASK 0xff00 #define DPM_TABLE_179__SamuLevel_5_Divider__SHIFT 0x8 #define DPM_TABLE_179__SamuLevel_5_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_179__SamuLevel_5_MinVoltage__SHIFT 0x10 #define DPM_TABLE_180__SamuLevel_5_Reserved_MASK 0xffffffff #define DPM_TABLE_180__SamuLevel_5_Reserved__SHIFT 0x0 #define DPM_TABLE_181__SamuLevel_6_Frequency_MASK 0xffffffff #define DPM_TABLE_181__SamuLevel_6_Frequency__SHIFT 0x0 #define DPM_TABLE_182__SamuLevel_6_ClkBypassCntl_MASK 0xff #define DPM_TABLE_182__SamuLevel_6_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_182__SamuLevel_6_Divider_MASK 0xff00 #define DPM_TABLE_182__SamuLevel_6_Divider__SHIFT 0x8 #define DPM_TABLE_182__SamuLevel_6_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_182__SamuLevel_6_MinVoltage__SHIFT 0x10 #define DPM_TABLE_183__SamuLevel_6_Reserved_MASK 0xffffffff #define DPM_TABLE_183__SamuLevel_6_Reserved__SHIFT 0x0 #define DPM_TABLE_184__SamuLevel_7_Frequency_MASK 0xffffffff #define DPM_TABLE_184__SamuLevel_7_Frequency__SHIFT 0x0 #define DPM_TABLE_185__SamuLevel_7_ClkBypassCntl_MASK 0xff #define DPM_TABLE_185__SamuLevel_7_ClkBypassCntl__SHIFT 0x0 #define DPM_TABLE_185__SamuLevel_7_Divider_MASK 0xff00 #define DPM_TABLE_185__SamuLevel_7_Divider__SHIFT 0x8 #define DPM_TABLE_185__SamuLevel_7_MinVoltage_MASK 0xffff0000 #define DPM_TABLE_185__SamuLevel_7_MinVoltage__SHIFT 0x10 #define DPM_TABLE_186__SamuLevel_7_Reserved_MASK 0xffffffff #define DPM_TABLE_186__SamuLevel_7_Reserved__SHIFT 0x0 #define DPM_TABLE_187__SamuBootLevel_MASK 0xff #define DPM_TABLE_187__SamuBootLevel__SHIFT 0x0 #define DPM_TABLE_187__AcpBootLevel_MASK 0xff00 #define DPM_TABLE_187__AcpBootLevel__SHIFT 0x8 #define DPM_TABLE_187__VceBootLevel_MASK 0xff0000 #define DPM_TABLE_187__VceBootLevel__SHIFT 0x10 #define DPM_TABLE_187__UvdBootLevel_MASK 0xff000000 #define DPM_TABLE_187__UvdBootLevel__SHIFT 0x18 #define DPM_TABLE_188__SAMUInterval_MASK 0xff #define DPM_TABLE_188__SAMUInterval__SHIFT 0x0 #define DPM_TABLE_188__ACPInterval_MASK 0xff00 #define DPM_TABLE_188__ACPInterval__SHIFT 0x8 #define DPM_TABLE_188__VCEInterval_MASK 0xff0000 #define DPM_TABLE_188__VCEInterval__SHIFT 0x10 #define DPM_TABLE_188__UVDInterval_MASK 0xff000000 #define DPM_TABLE_188__UVDInterval__SHIFT 0x18 #define DPM_TABLE_189__GraphicsVoltageChangeEnable_MASK 0xff #define DPM_TABLE_189__GraphicsVoltageChangeEnable__SHIFT 0x0 #define DPM_TABLE_189__GraphicsThermThrottleEnable_MASK 0xff00 #define DPM_TABLE_189__GraphicsThermThrottleEnable__SHIFT 0x8 #define DPM_TABLE_189__GraphicsInterval_MASK 0xff0000 #define DPM_TABLE_189__GraphicsInterval__SHIFT 0x10 #define DPM_TABLE_189__GraphicsBootLevel_MASK 0xff000000 #define DPM_TABLE_189__GraphicsBootLevel__SHIFT 0x18 #define DPM_TABLE_190__FpsLowThreshold_MASK 0xffff #define DPM_TABLE_190__FpsLowThreshold__SHIFT 0x0 #define DPM_TABLE_190__GraphicsClkSlowDivider_MASK 0xff0000 #define DPM_TABLE_190__GraphicsClkSlowDivider__SHIFT 0x10 #define DPM_TABLE_190__GraphicsClkSlowEnable_MASK 0xff000000 #define DPM_TABLE_190__GraphicsClkSlowEnable__SHIFT 0x18 #define DPM_TABLE_191__DisplayCac_MASK 0xffffffff #define DPM_TABLE_191__DisplayCac__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_4__HandshakeDisables_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_4__HandshakeDisables__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_5__DisplayPhy4Config_MASK 0xff #define SOFT_REGISTERS_TABLE_5__DisplayPhy4Config__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_5__DisplayPhy3Config_MASK 0xff00 #define SOFT_REGISTERS_TABLE_5__DisplayPhy3Config__SHIFT 0x8 #define SOFT_REGISTERS_TABLE_5__DisplayPhy2Config_MASK 0xff0000 #define SOFT_REGISTERS_TABLE_5__DisplayPhy2Config__SHIFT 0x10 #define SOFT_REGISTERS_TABLE_5__DisplayPhy1Config_MASK 0xff000000 #define SOFT_REGISTERS_TABLE_5__DisplayPhy1Config__SHIFT 0x18 #define SOFT_REGISTERS_TABLE_6__DisplayPhy8Config_MASK 0xff #define SOFT_REGISTERS_TABLE_6__DisplayPhy8Config__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_6__DisplayPhy7Config_MASK 0xff00 #define SOFT_REGISTERS_TABLE_6__DisplayPhy7Config__SHIFT 0x8 #define SOFT_REGISTERS_TABLE_6__DisplayPhy6Config_MASK 0xff0000 #define SOFT_REGISTERS_TABLE_6__DisplayPhy6Config__SHIFT 0x10 #define SOFT_REGISTERS_TABLE_6__DisplayPhy5Config_MASK 0xff000000 #define SOFT_REGISTERS_TABLE_6__DisplayPhy5Config__SHIFT 0x18 #define SOFT_REGISTERS_TABLE_7__AverageGraphicsActivity_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_7__AverageGraphicsActivity__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_8__AverageMemoryActivity_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_8__AverageMemoryActivity__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_9__AverageGioActivity_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_9__AverageGioActivity__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_10__PCIeDpmEnabledLevels_MASK 0xff #define SOFT_REGISTERS_TABLE_10__PCIeDpmEnabledLevels__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_10__LClkDpmEnabledLevels_MASK 0xff00 #define SOFT_REGISTERS_TABLE_10__LClkDpmEnabledLevels__SHIFT 0x8 #define SOFT_REGISTERS_TABLE_10__MClkDpmEnabledLevels_MASK 0xff0000 #define SOFT_REGISTERS_TABLE_10__MClkDpmEnabledLevels__SHIFT 0x10 #define SOFT_REGISTERS_TABLE_10__SClkDpmEnabledLevels_MASK 0xff000000 #define SOFT_REGISTERS_TABLE_10__SClkDpmEnabledLevels__SHIFT 0x18 #define SOFT_REGISTERS_TABLE_11__VCEDpmEnabledLevels_MASK 0xff #define SOFT_REGISTERS_TABLE_11__VCEDpmEnabledLevels__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_11__ACPDpmEnabledLevels_MASK 0xff00 #define SOFT_REGISTERS_TABLE_11__ACPDpmEnabledLevels__SHIFT 0x8 #define SOFT_REGISTERS_TABLE_11__SAMUDpmEnabledLevels_MASK 0xff0000 #define SOFT_REGISTERS_TABLE_11__SAMUDpmEnabledLevels__SHIFT 0x10 #define SOFT_REGISTERS_TABLE_11__UVDDpmEnabledLevels_MASK 0xff000000 #define SOFT_REGISTERS_TABLE_11__UVDDpmEnabledLevels__SHIFT 0x18 #define SOFT_REGISTERS_TABLE_12__Reserved_0_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_12__Reserved_0__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_13__Reserved_1_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_13__Reserved_1__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_14__Reserved_2_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_14__Reserved_2__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_15__Reserved_3_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_15__Reserved_3__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_16__Reserved_4_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_16__Reserved_4__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_17__Reserved_5_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_17__Reserved_5__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_18__Reserved_6_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_18__Reserved_6__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_19__Reserved_7_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_19__Reserved_7__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_20__Reserved_8_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_20__Reserved_8__SHIFT 0x0 #define SOFT_REGISTERS_TABLE_21__Reserved_9_MASK 0xffffffff #define SOFT_REGISTERS_TABLE_21__Reserved_9__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_0_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_0_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_0_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_0_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_0_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_0_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_0_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_0_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_1_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_1_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_1_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_1_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_1_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_1_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_1_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_1_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_2_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_2_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_2_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_2_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_2_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_2_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_2_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_2_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_3_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_3_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_3_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_3_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_3_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_3_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_3_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_3_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_4_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_4_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_4_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_4_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_4_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_4_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_4_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_4_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_5_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_5_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_5_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_5_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_5_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_5_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_5_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_5_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_6_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_6_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_6_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_6_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_6_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_6_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_6_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_6_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_7_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD_MASK 0xff #define SMU_LCLK_DPM_STATE_7_CNTL_0__LOW_VOLTAGE_REQ_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_7_CNTL_0__VID_MASK 0xff00 #define SMU_LCLK_DPM_STATE_7_CNTL_0__VID__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_7_CNTL_0__CLK_DIVIDER_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_7_CNTL_0__CLK_DIVIDER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_7_CNTL_0__STATE_VALID_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_7_CNTL_0__STATE_VALID__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_0_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_0_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_1_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_1_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_2_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_2_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_3_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_3_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_4_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_4_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_5_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_5_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_6_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_6_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_7_CNTL_1__MIN_VDDNB_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_7_CNTL_1__MIN_VDDNB__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_0_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_0_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_0_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_0_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_0_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_0_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_1_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_1_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_1_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_1_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_1_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_1_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_2_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_2_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_2_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_2_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_2_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_2_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_3_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_3_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_3_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_3_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_3_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_3_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_4_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_4_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_4_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_4_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_4_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_4_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_5_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_5_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_5_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_5_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_5_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_5_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_6_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_6_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_6_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_6_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_6_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_6_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_7_CNTL_2__HYSTERESIS_DOWN_MASK 0xff #define SMU_LCLK_DPM_STATE_7_CNTL_2__HYSTERESIS_DOWN__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_7_CNTL_2__HYSTERESIS_UP_MASK 0xff00 #define SMU_LCLK_DPM_STATE_7_CNTL_2__HYSTERESIS_UP__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_7_CNTL_2__RESIDENCY_COUNTER_MASK 0xffff0000 #define SMU_LCLK_DPM_STATE_7_CNTL_2__RESIDENCY_COUNTER__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_0_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_0_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_1_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_1_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_2_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_2_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_3_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_3_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_4_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_4_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_5_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_5_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_6_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_6_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_7_CNTL_3__LCLK_FREQUENCY_MASK 0xffffffff #define SMU_LCLK_DPM_STATE_7_CNTL_3__LCLK_FREQUENCY__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_0_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_1_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_2_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_3_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_4_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_5_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_6_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__RESERVED_MASK 0xff #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL_MASK 0xff00 #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__LCLKBYPASSCNTL__SHIFT 0x8 #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE_MASK 0xff0000 #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__ENABLED_FOR_THROTTLE__SHIFT 0x10 #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD_MASK 0xff000000 #define SMU_LCLK_DPM_STATE_7_ACTIVITY_THRESHOLD__ACTIVITY_THRESHOLD__SHIFT 0x18 #define GIO_PID_CONTROLLER_CNTL_0__K_I_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_0__K_I__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_1__LF_WINDUP_UPPER_LIM_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_1__LF_WINDUP_UPPER_LIM__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_2__LF_WINDUP_LOWER_LIM_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_2__LF_WINDUP_LOWER_LIM__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_3__STATE_PRECISION_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_3__STATE_PRECISION__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_4__LF_PRECISION_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_4__LF_PRECISION__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_5__LF_OFFSET_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_5__LF_OFFSET__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_6__MAX_STATE_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_6__MAX_STATE__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_7__MAX_LF_FRACTION_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_7__MAX_LF_FRACTION__SHIFT 0x0 #define GIO_PID_CONTROLLER_CNTL_8__STATE_SHIFT_MASK 0xffffffff #define GIO_PID_CONTROLLER_CNTL_8__STATE_SHIFT__SHIFT 0x0 #define SMU_LCLK_DPM_LEVEL_COUNT__LCLK_DPM_LEVEL_COUNT_MASK 0xffffffff #define SMU_LCLK_DPM_LEVEL_COUNT__LCLK_DPM_LEVEL_COUNT__SHIFT 0x0 #define SMU_LCLK_DPM_CNTL__RESERVED_MASK 0xff #define SMU_LCLK_DPM_CNTL__RESERVED__SHIFT 0x0 #define SMU_LCLK_DPM_CNTL__LCLK_DPM_BOOT_STATE_MASK 0xff00 #define SMU_LCLK_DPM_CNTL__LCLK_DPM_BOOT_STATE__SHIFT 0x8 #define SMU_LCLK_DPM_CNTL__VOLTAGE_CHG_EN_MASK 0xff0000 #define SMU_LCLK_DPM_CNTL__VOLTAGE_CHG_EN__SHIFT 0x10 #define SMU_LCLK_DPM_CNTL__LCLK_DPM_EN_MASK 0xff000000 #define SMU_LCLK_DPM_CNTL__LCLK_DPM_EN__SHIFT 0x18 #define SMU_LCLK_DPM_CURRENT_AND_TARGET_STATE__CURRENT_STATE_MASK 0xff #define SMU_LCLK_DPM_CURRENT_AND_TARGET_STATE__CURRENT_STATE__SHIFT 0x0 #define SMU_LCLK_DPM_CURRENT_AND_TARGET_STATE__TARGET_STATE_MASK 0xff00 #define SMU_LCLK_DPM_CURRENT_AND_TARGET_STATE__TARGET_STATE__SHIFT 0x8 #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__LCLK_THERMAL_THROTTLING_EN_MASK 0xff #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__LCLK_THERMAL_THROTTLING_EN__SHIFT 0x0 #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__TEMPERATURE_SEL_MASK 0xff00 #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__TEMPERATURE_SEL__SHIFT 0x8 #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__LCLK_TT_MODE_MASK 0xff0000 #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__LCLK_TT_MODE__SHIFT 0x10 #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__TT_HTC_ACTIVE_MASK 0xff000000 #define SMU_LCLK_DPM_THERMAL_THROTTLING_CNTL__TT_HTC_ACTIVE__SHIFT 0x18 #define SMU_LCLK_DPM_THERMAL_THROTTLING_THRESHOLDS__LOW_THRESHOLD_MASK 0xffff #define SMU_LCLK_DPM_THERMAL_THROTTLING_THRESHOLDS__LOW_THRESHOLD__SHIFT 0x0 #define SMU_LCLK_DPM_THERMAL_THROTTLING_THRESHOLDS__HIGH_THRESHOLD_MASK 0xffff0000 #define SMU_LCLK_DPM_THERMAL_THROTTLING_THRESHOLDS__HIGH_THRESHOLD__SHIFT 0x10 #define PM_FUSES_1__BapmPstateVid_3_MASK 0xff #define PM_FUSES_1__BapmPstateVid_3__SHIFT 0x0 #define PM_FUSES_1__BapmPstateVid_2_MASK 0xff00 #define PM_FUSES_1__BapmPstateVid_2__SHIFT 0x8 #define PM_FUSES_1__BapmPstateVid_1_MASK 0xff0000 #define PM_FUSES_1__BapmPstateVid_1__SHIFT 0x10 #define PM_FUSES_1__BapmPstateVid_0_MASK 0xff000000 #define PM_FUSES_1__BapmPstateVid_0__SHIFT 0x18 #define PM_FUSES_2__BapmPstateVid_7_MASK 0xff #define PM_FUSES_2__BapmPstateVid_7__SHIFT 0x0 #define PM_FUSES_2__BapmPstateVid_6_MASK 0xff00 #define PM_FUSES_2__BapmPstateVid_6__SHIFT 0x8 #define PM_FUSES_2__BapmPstateVid_5_MASK 0xff0000 #define PM_FUSES_2__BapmPstateVid_5__SHIFT 0x10 #define PM_FUSES_2__BapmPstateVid_4_MASK 0xff000000 #define PM_FUSES_2__BapmPstateVid_4__SHIFT 0x18 #define PM_FUSES_3__BapmVddNbVidHiSidd_3_MASK 0xff #define PM_FUSES_3__BapmVddNbVidHiSidd_3__SHIFT 0x0 #define PM_FUSES_3__BapmVddNbVidHiSidd_2_MASK 0xff00 #define PM_FUSES_3__BapmVddNbVidHiSidd_2__SHIFT 0x8 #define PM_FUSES_3__BapmVddNbVidHiSidd_1_MASK 0xff0000 #define PM_FUSES_3__BapmVddNbVidHiSidd_1__SHIFT 0x10 #define PM_FUSES_3__BapmVddNbVidHiSidd_0_MASK 0xff000000 #define PM_FUSES_3__BapmVddNbVidHiSidd_0__SHIFT 0x18 #define PM_FUSES_4__BapmVddNbVidLoSidd_2_MASK 0xff #define PM_FUSES_4__BapmVddNbVidLoSidd_2__SHIFT 0x0 #define PM_FUSES_4__BapmVddNbVidLoSidd_1_MASK 0xff00 #define PM_FUSES_4__BapmVddNbVidLoSidd_1__SHIFT 0x8 #define PM_FUSES_4__BapmVddNbVidLoSidd_0_MASK 0xff0000 #define PM_FUSES_4__BapmVddNbVidLoSidd_0__SHIFT 0x10 #define PM_FUSES_4__BapmVddNbVidHiSidd_4_MASK 0xff000000 #define PM_FUSES_4__BapmVddNbVidHiSidd_4__SHIFT 0x18 #define PM_FUSES_5__CpuIdModel_MASK 0xff #define PM_FUSES_5__CpuIdModel__SHIFT 0x0 #define PM_FUSES_5__SviLoadLineEn_MASK 0xff00 #define PM_FUSES_5__SviLoadLineEn__SHIFT 0x8 #define PM_FUSES_5__BapmVddNbVidLoSidd_4_MASK 0xff0000 #define PM_FUSES_5__BapmVddNbVidLoSidd_4__SHIFT 0x10 #define PM_FUSES_5__BapmVddNbVidLoSidd_3_MASK 0xff000000 #define PM_FUSES_5__BapmVddNbVidLoSidd_3__SHIFT 0x18 #define PM_FUSES_6__SviLoadLineTrimVddNb_MASK 0xff #define PM_FUSES_6__SviLoadLineTrimVddNb__SHIFT 0x0 #define PM_FUSES_6__SviLoadLineTrimVdd_MASK 0xff00 #define PM_FUSES_6__SviLoadLineTrimVdd__SHIFT 0x8 #define PM_FUSES_6__SviLoadLineVddNb_MASK 0xff0000 #define PM_FUSES_6__SviLoadLineVddNb__SHIFT 0x10 #define PM_FUSES_6__SviLoadLineVdd_MASK 0xff000000 #define PM_FUSES_6__SviLoadLineVdd__SHIFT 0x18 #define PM_FUSES_7__BAPMTI_TjOffset_0_MASK 0xffff #define PM_FUSES_7__BAPMTI_TjOffset_0__SHIFT 0x0 #define PM_FUSES_7__SviLoadLineOffsetVddNb_MASK 0xff0000 #define PM_FUSES_7__SviLoadLineOffsetVddNb__SHIFT 0x10 #define PM_FUSES_7__SviLoadLineOffsetVdd_MASK 0xff000000 #define PM_FUSES_7__SviLoadLineOffsetVdd__SHIFT 0x18 #define PM_FUSES_8__BAPMTI_TjOffset_2_MASK 0xffff #define PM_FUSES_8__BAPMTI_TjOffset_2__SHIFT 0x0 #define PM_FUSES_8__BAPMTI_TjOffset_1_MASK 0xffff0000 #define PM_FUSES_8__BAPMTI_TjOffset_1__SHIFT 0x10 #define PM_FUSES_9__BAPMTI_TjHyst_1_MASK 0xffff #define PM_FUSES_9__BAPMTI_TjHyst_1__SHIFT 0x0 #define PM_FUSES_9__BAPMTI_TjHyst_0_MASK 0xffff0000 #define PM_FUSES_9__BAPMTI_TjHyst_0__SHIFT 0x10 #define PM_FUSES_10__BAPMTI_TjMax_1_MASK 0xff #define PM_FUSES_10__BAPMTI_TjMax_1__SHIFT 0x0 #define PM_FUSES_10__BAPMTI_TjMax_0_MASK 0xff00 #define PM_FUSES_10__BAPMTI_TjMax_0__SHIFT 0x8 #define PM_FUSES_10__BAPMTI_GpuTjHyst_MASK 0xffff0000 #define PM_FUSES_10__BAPMTI_GpuTjHyst__SHIFT 0x10 #define PM_FUSES_11__LhtcTmpLmt_MASK 0xff #define PM_FUSES_11__LhtcTmpLmt__SHIFT 0x0 #define PM_FUSES_11__LhtcPstateLimit_MASK 0xff00 #define PM_FUSES_11__LhtcPstateLimit__SHIFT 0x8 #define PM_FUSES_11__LhtcHystLmt_MASK 0xff0000 #define PM_FUSES_11__LhtcHystLmt__SHIFT 0x10 #define PM_FUSES_11__BAPMTI_GpuTjMax_MASK 0xff000000 #define PM_FUSES_11__BAPMTI_GpuTjMax__SHIFT 0x18 #define PM_FUSES_12__MaxPwrCpu_1_MASK 0xff #define PM_FUSES_12__MaxPwrCpu_1__SHIFT 0x0 #define PM_FUSES_12__MaxPwrCpu_0_MASK 0xff00 #define PM_FUSES_12__MaxPwrCpu_0__SHIFT 0x8 #define PM_FUSES_12__NomPwrCpu_1_MASK 0xff0000 #define PM_FUSES_12__NomPwrCpu_1__SHIFT 0x10 #define PM_FUSES_12__NomPwrCpu_0_MASK 0xff000000 #define PM_FUSES_12__NomPwrCpu_0__SHIFT 0x18 #define PM_FUSES_13__NomPwrGpu_MASK 0xffff #define PM_FUSES_13__NomPwrGpu__SHIFT 0x0 #define PM_FUSES_13__MidPwrCpu_1_MASK 0xff0000 #define PM_FUSES_13__MidPwrCpu_1__SHIFT 0x10 #define PM_FUSES_13__MidPwrCpu_0_MASK 0xff000000 #define PM_FUSES_13__MidPwrCpu_0__SHIFT 0x18 #define PM_FUSES_14__MinPwrGpu_MASK 0xffff #define PM_FUSES_14__MinPwrGpu__SHIFT 0x0 #define PM_FUSES_14__MaxPwrGpu_MASK 0xffff0000 #define PM_FUSES_14__MaxPwrGpu__SHIFT 0x10 #define PM_FUSES_15__PCIe3PhyOffset_MASK 0xff #define PM_FUSES_15__PCIe3PhyOffset__SHIFT 0x0 #define PM_FUSES_15__PCIe2PhyOffset_MASK 0xff00 #define PM_FUSES_15__PCIe2PhyOffset__SHIFT 0x8 #define PM_FUSES_15__PCIe1PhyOffset_MASK 0xff0000 #define PM_FUSES_15__PCIe1PhyOffset__SHIFT 0x10 #define PM_FUSES_15__MidPwrTempHyst_MASK 0xff000000 #define PM_FUSES_15__MidPwrTempHyst__SHIFT 0x18 #define PM_FUSES_16__TDC_VDD_PkgLimit_MASK 0xffff #define PM_FUSES_16__TDC_VDD_PkgLimit__SHIFT 0x0 #define PM_FUSES_16__DCE2PhyOffset_MASK 0xff0000 #define PM_FUSES_16__DCE2PhyOffset__SHIFT 0x10 #define PM_FUSES_16__DCE1PhyOffset_MASK 0xff000000 #define PM_FUSES_16__DCE1PhyOffset__SHIFT 0x18 #define PM_FUSES_17__TDC_VDDNB_ThrottleReleaseLimitPerc_MASK 0xff #define PM_FUSES_17__TDC_VDDNB_ThrottleReleaseLimitPerc__SHIFT 0x0 #define PM_FUSES_17__TDC_VDD_ThrottleReleaseLimitPerc_MASK 0xff00 #define PM_FUSES_17__TDC_VDD_ThrottleReleaseLimitPerc__SHIFT 0x8 #define PM_FUSES_17__TDC_VDDNB_PkgLimit_MASK 0xffff0000 #define PM_FUSES_17__TDC_VDDNB_PkgLimit__SHIFT 0x10 #define PM_FUSES_18__TdcWaterfallCtl_MASK 0xff #define PM_FUSES_18__TdcWaterfallCtl__SHIFT 0x0 #define PM_FUSES_18__TdpAgeRate_MASK 0xff00 #define PM_FUSES_18__TdpAgeRate__SHIFT 0x8 #define PM_FUSES_18__TdpAgeValue_MASK 0xff0000 #define PM_FUSES_18__TdpAgeValue__SHIFT 0x10 #define PM_FUSES_18__TDC_MAWt_MASK 0xff000000 #define PM_FUSES_18__TDC_MAWt__SHIFT 0x18 #define PM_FUSES_19__BapmLhtcCap_MASK 0xff #define PM_FUSES_19__BapmLhtcCap__SHIFT 0x0 #define PM_FUSES_19__BapmFuseOverride_MASK 0xff00 #define PM_FUSES_19__BapmFuseOverride__SHIFT 0x8 #define PM_FUSES_19__SmuCoolingIndex_MASK 0xff0000 #define PM_FUSES_19__SmuCoolingIndex__SHIFT 0x10 #define PM_FUSES_19__SmuSocIndex_MASK 0xff000000 #define PM_FUSES_19__SmuSocIndex__SHIFT 0x18 #define PM_FUSES_20__SamClkDid_3_MASK 0xff #define PM_FUSES_20__SamClkDid_3__SHIFT 0x0 #define PM_FUSES_20__SamClkDid_2_MASK 0xff00 #define PM_FUSES_20__SamClkDid_2__SHIFT 0x8 #define PM_FUSES_20__SamClkDid_1_MASK 0xff0000 #define PM_FUSES_20__SamClkDid_1__SHIFT 0x10 #define PM_FUSES_20__SamClkDid_0_MASK 0xff000000 #define PM_FUSES_20__SamClkDid_0__SHIFT 0x18 #define PM_FUSES_21__AmbientTempBase_MASK 0xff #define PM_FUSES_21__AmbientTempBase__SHIFT 0x0 #define PM_FUSES_21__LPMLTemperatureMax_MASK 0xff00 #define PM_FUSES_21__LPMLTemperatureMax__SHIFT 0x8 #define PM_FUSES_21__LPMLTemperatureMin_MASK 0xff0000 #define PM_FUSES_21__LPMLTemperatureMin__SHIFT 0x10 #define PM_FUSES_21__SamClkDid_4_MASK 0xff000000 #define PM_FUSES_21__SamClkDid_4__SHIFT 0x18 #define PM_FUSES_22__LPMLTemperatureScaler_3_MASK 0xff #define PM_FUSES_22__LPMLTemperatureScaler_3__SHIFT 0x0 #define PM_FUSES_22__LPMLTemperatureScaler_2_MASK 0xff00 #define PM_FUSES_22__LPMLTemperatureScaler_2__SHIFT 0x8 #define PM_FUSES_22__LPMLTemperatureScaler_1_MASK 0xff0000 #define PM_FUSES_22__LPMLTemperatureScaler_1__SHIFT 0x10 #define PM_FUSES_22__LPMLTemperatureScaler_0_MASK 0xff000000 #define PM_FUSES_22__LPMLTemperatureScaler_0__SHIFT 0x18 #define PM_FUSES_23__LPMLTemperatureScaler_7_MASK 0xff #define PM_FUSES_23__LPMLTemperatureScaler_7__SHIFT 0x0 #define PM_FUSES_23__LPMLTemperatureScaler_6_MASK 0xff00 #define PM_FUSES_23__LPMLTemperatureScaler_6__SHIFT 0x8 #define PM_FUSES_23__LPMLTemperatureScaler_5_MASK 0xff0000 #define PM_FUSES_23__LPMLTemperatureScaler_5__SHIFT 0x10 #define PM_FUSES_23__LPMLTemperatureScaler_4_MASK 0xff000000 #define PM_FUSES_23__LPMLTemperatureScaler_4__SHIFT 0x18 #define PM_FUSES_24__LPMLTemperatureScaler_11_MASK 0xff #define PM_FUSES_24__LPMLTemperatureScaler_11__SHIFT 0x0 #define PM_FUSES_24__LPMLTemperatureScaler_10_MASK 0xff00 #define PM_FUSES_24__LPMLTemperatureScaler_10__SHIFT 0x8 #define PM_FUSES_24__LPMLTemperatureScaler_9_MASK 0xff0000 #define PM_FUSES_24__LPMLTemperatureScaler_9__SHIFT 0x10 #define PM_FUSES_24__LPMLTemperatureScaler_8_MASK 0xff000000 #define PM_FUSES_24__LPMLTemperatureScaler_8__SHIFT 0x18 #define PM_FUSES_25__LPMLTemperatureScaler_15_MASK 0xff #define PM_FUSES_25__LPMLTemperatureScaler_15__SHIFT 0x0 #define PM_FUSES_25__LPMLTemperatureScaler_14_MASK 0xff00 #define PM_FUSES_25__LPMLTemperatureScaler_14__SHIFT 0x8 #define PM_FUSES_25__LPMLTemperatureScaler_13_MASK 0xff0000 #define PM_FUSES_25__LPMLTemperatureScaler_13__SHIFT 0x10 #define PM_FUSES_25__LPMLTemperatureScaler_12_MASK 0xff000000 #define PM_FUSES_25__LPMLTemperatureScaler_12__SHIFT 0x18 #define PM_FUSES_26__GnbLPML_3_MASK 0xff #define PM_FUSES_26__GnbLPML_3__SHIFT 0x0 #define PM_FUSES_26__GnbLPML_2_MASK 0xff00 #define PM_FUSES_26__GnbLPML_2__SHIFT 0x8 #define PM_FUSES_26__GnbLPML_1_MASK 0xff0000 #define PM_FUSES_26__GnbLPML_1__SHIFT 0x10 #define PM_FUSES_26__GnbLPML_0_MASK 0xff000000 #define PM_FUSES_26__GnbLPML_0__SHIFT 0x18 #define PM_FUSES_27__GnbLPML_7_MASK 0xff #define PM_FUSES_27__GnbLPML_7__SHIFT 0x0 #define PM_FUSES_27__GnbLPML_6_MASK 0xff00 #define PM_FUSES_27__GnbLPML_6__SHIFT 0x8 #define PM_FUSES_27__GnbLPML_5_MASK 0xff0000 #define PM_FUSES_27__GnbLPML_5__SHIFT 0x10 #define PM_FUSES_27__GnbLPML_4_MASK 0xff000000 #define PM_FUSES_27__GnbLPML_4__SHIFT 0x18 #define PM_FUSES_28__GnbLPML_11_MASK 0xff #define PM_FUSES_28__GnbLPML_11__SHIFT 0x0 #define PM_FUSES_28__GnbLPML_10_MASK 0xff00 #define PM_FUSES_28__GnbLPML_10__SHIFT 0x8 #define PM_FUSES_28__GnbLPML_9_MASK 0xff0000 #define PM_FUSES_28__GnbLPML_9__SHIFT 0x10 #define PM_FUSES_28__GnbLPML_8_MASK 0xff000000 #define PM_FUSES_28__GnbLPML_8__SHIFT 0x18 #define PM_FUSES_29__GnbLPML_15_MASK 0xff #define PM_FUSES_29__GnbLPML_15__SHIFT 0x0 #define PM_FUSES_29__GnbLPML_14_MASK 0xff00 #define PM_FUSES_29__GnbLPML_14__SHIFT 0x8 #define PM_FUSES_29__GnbLPML_13_MASK 0xff0000 #define PM_FUSES_29__GnbLPML_13__SHIFT 0x10 #define PM_FUSES_29__GnbLPML_12_MASK 0xff000000 #define PM_FUSES_29__GnbLPML_12__SHIFT 0x18 #define PM_FUSES_30__NbVid_3_MASK 0xff #define PM_FUSES_30__NbVid_3__SHIFT 0x0 #define PM_FUSES_30__NbVid_2_MASK 0xff00 #define PM_FUSES_30__NbVid_2__SHIFT 0x8 #define PM_FUSES_30__NbVid_1_MASK 0xff0000 #define PM_FUSES_30__NbVid_1__SHIFT 0x10 #define PM_FUSES_30__NbVid_0_MASK 0xff000000 #define PM_FUSES_30__NbVid_0__SHIFT 0x18 #define PM_FUSES_31__CpuVid_3_MASK 0xff #define PM_FUSES_31__CpuVid_3__SHIFT 0x0 #define PM_FUSES_31__CpuVid_2_MASK 0xff00 #define PM_FUSES_31__CpuVid_2__SHIFT 0x8 #define PM_FUSES_31__CpuVid_1_MASK 0xff0000 #define PM_FUSES_31__CpuVid_1__SHIFT 0x10 #define PM_FUSES_31__CpuVid_0_MASK 0xff000000 #define PM_FUSES_31__CpuVid_0__SHIFT 0x18 #define PM_FUSES_32__CpuVid_7_MASK 0xff #define PM_FUSES_32__CpuVid_7__SHIFT 0x0 #define PM_FUSES_32__CpuVid_6_MASK 0xff00 #define PM_FUSES_32__CpuVid_6__SHIFT 0x8 #define PM_FUSES_32__CpuVid_5_MASK 0xff0000 #define PM_FUSES_32__CpuVid_5__SHIFT 0x10 #define PM_FUSES_32__CpuVid_4_MASK 0xff000000 #define PM_FUSES_32__CpuVid_4__SHIFT 0x18 #define PM_FUSES_33__Tdp2Watt_MASK 0xffff #define PM_FUSES_33__Tdp2Watt__SHIFT 0x0 #define PM_FUSES_33__GnbLPMLMinVid_MASK 0xff0000 #define PM_FUSES_33__GnbLPMLMinVid__SHIFT 0x10 #define PM_FUSES_33__GnbLPMLMaxVid_MASK 0xff000000 #define PM_FUSES_33__GnbLPMLMaxVid__SHIFT 0x18 #define PM_FUSES_34__Lpml_3_MASK 0xff #define PM_FUSES_34__Lpml_3__SHIFT 0x0 #define PM_FUSES_34__Lpml_2_MASK 0xff00 #define PM_FUSES_34__Lpml_2__SHIFT 0x8 #define PM_FUSES_34__Lpml_1_MASK 0xff0000 #define PM_FUSES_34__Lpml_1__SHIFT 0x10 #define PM_FUSES_34__Lpml_0_MASK 0xff000000 #define PM_FUSES_34__Lpml_0__SHIFT 0x18 #define PM_FUSES_35__Lpml_7_MASK 0xff #define PM_FUSES_35__Lpml_7__SHIFT 0x0 #define PM_FUSES_35__Lpml_6_MASK 0xff00 #define PM_FUSES_35__Lpml_6__SHIFT 0x8 #define PM_FUSES_35__Lpml_5_MASK 0xff0000 #define PM_FUSES_35__Lpml_5__SHIFT 0x10 #define PM_FUSES_35__Lpml_4_MASK 0xff000000 #define PM_FUSES_35__Lpml_4__SHIFT 0x18 #define PM_FUSES_36__Lpmv_3_MASK 0xff #define PM_FUSES_36__Lpmv_3__SHIFT 0x0 #define PM_FUSES_36__Lpmv_2_MASK 0xff00 #define PM_FUSES_36__Lpmv_2__SHIFT 0x8 #define PM_FUSES_36__Lpmv_1_MASK 0xff0000 #define PM_FUSES_36__Lpmv_1__SHIFT 0x10 #define PM_FUSES_36__Lpmv_0_MASK 0xff000000 #define PM_FUSES_36__Lpmv_0__SHIFT 0x18 #define PM_FUSES_37__Lpmv_7_MASK 0xff #define PM_FUSES_37__Lpmv_7__SHIFT 0x0 #define PM_FUSES_37__Lpmv_6_MASK 0xff00 #define PM_FUSES_37__Lpmv_6__SHIFT 0x8 #define PM_FUSES_37__Lpmv_5_MASK 0xff0000 #define PM_FUSES_37__Lpmv_5__SHIFT 0x10 #define PM_FUSES_37__Lpmv_4_MASK 0xff000000 #define PM_FUSES_37__Lpmv_4__SHIFT 0x18 #define PM_FUSES_38__EClkDid_3_MASK 0xff #define PM_FUSES_38__EClkDid_3__SHIFT 0x0 #define PM_FUSES_38__EClkDid_2_MASK 0xff00 #define PM_FUSES_38__EClkDid_2__SHIFT 0x8 #define PM_FUSES_38__EClkDid_1_MASK 0xff0000 #define PM_FUSES_38__EClkDid_1__SHIFT 0x10 #define PM_FUSES_38__EClkDid_0_MASK 0xff000000 #define PM_FUSES_38__EClkDid_0__SHIFT 0x18 #define PM_FUSES_39__CoreDis_MASK 0xff #define PM_FUSES_39__CoreDis__SHIFT 0x0 #define PM_FUSES_39__C6CstatePower_MASK 0xff00 #define PM_FUSES_39__C6CstatePower__SHIFT 0x8 #define PM_FUSES_39__BoostLock_MASK 0xff0000 #define PM_FUSES_39__BoostLock__SHIFT 0x10 #define PM_FUSES_39__EClkDid_4_MASK 0xff000000 #define PM_FUSES_39__EClkDid_4__SHIFT 0x18 #define PM_FUSES_40__BapmVddNbBaseLeakageLoSidd_MASK 0xffff #define PM_FUSES_40__BapmVddNbBaseLeakageLoSidd__SHIFT 0x0 #define PM_FUSES_40__BapmVddNbBaseLeakageHiSidd_MASK 0xffff0000 #define PM_FUSES_40__BapmVddNbBaseLeakageHiSidd__SHIFT 0x10 #define PM_FUSES_41__VddNbVid_3_MASK 0xff #define PM_FUSES_41__VddNbVid_3__SHIFT 0x0 #define PM_FUSES_41__VddNbVid_2_MASK 0xff00 #define PM_FUSES_41__VddNbVid_2__SHIFT 0x8 #define PM_FUSES_41__VddNbVid_1_MASK 0xff0000 #define PM_FUSES_41__VddNbVid_1__SHIFT 0x10 #define PM_FUSES_41__VddNbVid_0_MASK 0xff000000 #define PM_FUSES_41__VddNbVid_0__SHIFT 0x18 #define PM_FUSES_42__VddNbVidOffset_2_MASK 0xff #define PM_FUSES_42__VddNbVidOffset_2__SHIFT 0x0 #define PM_FUSES_42__VddNbVidOffset_1_MASK 0xff00 #define PM_FUSES_42__VddNbVidOffset_1__SHIFT 0x8 #define PM_FUSES_42__VddNbVidOffset_0_MASK 0xff0000 #define PM_FUSES_42__VddNbVidOffset_0__SHIFT 0x10 #define PM_FUSES_42__VddNbVid_4_MASK 0xff000000 #define PM_FUSES_42__VddNbVid_4__SHIFT 0x18 #define PM_FUSES_43__BapmDisable_MASK 0xff #define PM_FUSES_43__BapmDisable__SHIFT 0x0 #define PM_FUSES_43__CoreTdpLimit0_MASK 0xff00 #define PM_FUSES_43__CoreTdpLimit0__SHIFT 0x8 #define PM_FUSES_43__VddNbVidOffset_4_MASK 0xff0000 #define PM_FUSES_43__VddNbVidOffset_4__SHIFT 0x10 #define PM_FUSES_43__VddNbVidOffset_3_MASK 0xff000000 #define PM_FUSES_43__VddNbVidOffset_3__SHIFT 0x18 #define PM_FUSES_44__LpmlL2_3_MASK 0xff #define PM_FUSES_44__LpmlL2_3__SHIFT 0x0 #define PM_FUSES_44__LpmlL2_2_MASK 0xff00 #define PM_FUSES_44__LpmlL2_2__SHIFT 0x8 #define PM_FUSES_44__LpmlL2_1_MASK 0xff0000 #define PM_FUSES_44__LpmlL2_1__SHIFT 0x10 #define PM_FUSES_44__LpmlL2_0_MASK 0xff000000 #define PM_FUSES_44__LpmlL2_0__SHIFT 0x18 #define PM_FUSES_45__LpmlL2_7_MASK 0xff #define PM_FUSES_45__LpmlL2_7__SHIFT 0x0 #define PM_FUSES_45__LpmlL2_6_MASK 0xff00 #define PM_FUSES_45__LpmlL2_6__SHIFT 0x8 #define PM_FUSES_45__LpmlL2_5_MASK 0xff0000 #define PM_FUSES_45__LpmlL2_5__SHIFT 0x10 #define PM_FUSES_45__LpmlL2_4_MASK 0xff000000 #define PM_FUSES_45__LpmlL2_4__SHIFT 0x18 #define PM_FUSES_46__CoolPdmTc_MASK 0xff #define PM_FUSES_46__CoolPdmTc__SHIFT 0x0 #define PM_FUSES_46__BaseCpcTdpLimit2_MASK 0xff00 #define PM_FUSES_46__BaseCpcTdpLimit2__SHIFT 0x8 #define PM_FUSES_46__BaseCpcTdpLimit1_MASK 0xff0000 #define PM_FUSES_46__BaseCpcTdpLimit1__SHIFT 0x10 #define PM_FUSES_46__BaseCpcTdpLimit_MASK 0xff000000 #define PM_FUSES_46__BaseCpcTdpLimit__SHIFT 0x18 #define PM_FUSES_47__CoolPdmThr2_MASK 0xff #define PM_FUSES_47__CoolPdmThr2__SHIFT 0x0 #define PM_FUSES_47__CoolPdmThr1_MASK 0xff00 #define PM_FUSES_47__CoolPdmThr1__SHIFT 0x8 #define PM_FUSES_47__GpuPdmTc_MASK 0xff0000 #define PM_FUSES_47__GpuPdmTc__SHIFT 0x10 #define PM_FUSES_47__HeatPdmTc_MASK 0xff000000 #define PM_FUSES_47__HeatPdmTc__SHIFT 0x18 #define PM_FUSES_48__PkgPwr_MAWt_MASK 0xff #define PM_FUSES_48__PkgPwr_MAWt__SHIFT 0x0 #define PM_FUSES_48__GpuActThr_MASK 0xff00 #define PM_FUSES_48__GpuActThr__SHIFT 0x8 #define PM_FUSES_48__HeatPdmThr2_MASK 0xff0000 #define PM_FUSES_48__HeatPdmThr2__SHIFT 0x10 #define PM_FUSES_48__HeatPdmThr1_MASK 0xff000000 #define PM_FUSES_48__HeatPdmThr1__SHIFT 0x18 #define PM_FUSES_49__SocketTdp_MASK 0xffff #define PM_FUSES_49__SocketTdp__SHIFT 0x0 #define PM_FUSES_49__GpuPdmMult_MASK 0xffff0000 #define PM_FUSES_49__GpuPdmMult__SHIFT 0x10 #define PM_FUSES_50__Reserved2_MASK 0xffff #define PM_FUSES_50__Reserved2__SHIFT 0x0 #define PM_FUSES_50__Reserved1_MASK 0xff0000 #define PM_FUSES_50__Reserved1__SHIFT 0x10 #define PM_FUSES_50__NumBoostStates_MASK 0xff000000 #define PM_FUSES_50__NumBoostStates__SHIFT 0x18 #define PM_FUSES_51__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_51__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_52__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_52__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_53__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_53__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_54__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_54__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_55__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_55__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_56__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_56__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_57__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_57__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_58__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_58__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_59__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_59__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_60__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_60__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_61__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_61__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_62__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_62__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_63__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_63__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_64__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_64__FUSE_DATA__SHIFT 0x0 #define PM_FUSES_65__FUSE_DATA_MASK 0xffffffff #define PM_FUSES_65__FUSE_DATA__SHIFT 0x0 #define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1 #define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0 #define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe #define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1 #define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000 #define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18 #define TEMPERATURE_READ_ADDR__CSR_ADDR_MASK 0x3f #define TEMPERATURE_READ_ADDR__CSR_ADDR__SHIFT 0x0 #define TEMPERATURE_READ_ADDR__TCEN_ID_MASK 0x3c0 #define TEMPERATURE_READ_ADDR__TCEN_ID__SHIFT 0x6 #define TEMPERATURE_READ_ADDR__RESERVED_MASK 0xfffffc00 #define TEMPERATURE_READ_ADDR__RESERVED__SHIFT 0xa #define CURRENT_GNB_TEMP__TEMP_MASK 0x7ff #define CURRENT_GNB_TEMP__TEMP__SHIFT 0x0 #define CURRENT_GLOBAL_TEMP__TEMP_MASK 0x7ff #define CURRENT_GLOBAL_TEMP__TEMP__SHIFT 0x0 #define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1 #define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0 #define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2 #define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1 #define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4 #define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2 #define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8 #define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3 #define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10 #define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4 #define FEATURE_STATUS__ACP_DPM_ON_MASK 0x20 #define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x5 #define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x40 #define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x6 #define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80 #define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7 #define FEATURE_STATUS__BAPM_ON_MASK 0x100 #define FEATURE_STATUS__BAPM_ON__SHIFT 0x8 #define FEATURE_STATUS__LPMX_ON_MASK 0x200 #define FEATURE_STATUS__LPMX_ON__SHIFT 0x9 #define FEATURE_STATUS__NBDPM_ON_MASK 0x400 #define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa #define FEATURE_STATUS__LHTC_ON_MASK 0x800 #define FEATURE_STATUS__LHTC_ON__SHIFT 0xb #define FEATURE_STATUS__VPC_ON_MASK 0x1000 #define FEATURE_STATUS__VPC_ON__SHIFT 0xc #define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000 #define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd #define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000 #define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe #define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000 #define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf #define FEATURE_STATUS__AVS_ON_MASK 0x10000 #define FEATURE_STATUS__AVS_ON__SHIFT 0x10 #define FEATURE_STATUS__SPMI_ON_MASK 0x20000 #define FEATURE_STATUS__SPMI_ON__SHIFT 0x11 #define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000 #define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12 #define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000 #define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13 #define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000 #define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14 #define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000 #define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15 #define FEATURE_STATUS__CLK_MON_ON_MASK 0x400000 #define FEATURE_STATUS__CLK_MON_ON__SHIFT 0x16 #define FEATURE_STATUS__RESERVED_MASK 0xff800000 #define FEATURE_STATUS__RESERVED__SHIFT 0x17 #define PCIE_PLL_RECONF__RECONF_WAIT_MASK 0xff #define PCIE_PLL_RECONF__RECONF_WAIT__SHIFT 0x0 #define PCIE_PLL_RECONF__RECONF_WRAPPER_MASK 0xff00 #define PCIE_PLL_RECONF__RECONF_WRAPPER__SHIFT 0x8 #define PCIE_PLL_RECONF__SB_RELOCATE_EN_MASK 0xff0000 #define PCIE_PLL_RECONF__SB_RELOCATE_EN__SHIFT 0x10 #define PCIE_PLL_RECONF__SB_NEW_PORT_MASK 0xff000000 #define PCIE_PLL_RECONF__SB_NEW_PORT__SHIFT 0x18 #define PM_INTERVAL_CNTL_0__LCLK_DPM_MASK 0xff #define PM_INTERVAL_CNTL_0__LCLK_DPM__SHIFT 0x0 #define PM_INTERVAL_CNTL_0__THERMAL_CNTL_MASK 0xff00 #define PM_INTERVAL_CNTL_0__THERMAL_CNTL__SHIFT 0x8 #define PM_INTERVAL_CNTL_0__VOLTAGE_CNTL_MASK 0xff0000 #define PM_INTERVAL_CNTL_0__VOLTAGE_CNTL__SHIFT 0x10 #define PM_INTERVAL_CNTL_0__LOADLINE_MASK 0xff000000 #define PM_INTERVAL_CNTL_0__LOADLINE__SHIFT 0x18 #define PM_INTERVAL_CNTL_1__NB_DPM_MASK 0xff #define PM_INTERVAL_CNTL_1__NB_DPM__SHIFT 0x0 #define PM_INTERVAL_CNTL_1__AVS_PERIOD_MASK 0xff00 #define PM_INTERVAL_CNTL_1__AVS_PERIOD__SHIFT 0x8 #define PM_INTERVAL_CNTL_1__PKGPWR_PERIOD_MASK 0xff0000 #define PM_INTERVAL_CNTL_1__PKGPWR_PERIOD__SHIFT 0x10 #define PM_INTERVAL_CNTL_1__TDP_CNTL_MASK 0xff000000 #define PM_INTERVAL_CNTL_1__TDP_CNTL__SHIFT 0x18 #define PM_INTERVAL_CNTL_2__BAPM_PERIOD_MASK 0xff #define PM_INTERVAL_CNTL_2__BAPM_PERIOD__SHIFT 0x0 #define PM_INTERVAL_CNTL_2__HTC_PERIOD_MASK 0xff00 #define PM_INTERVAL_CNTL_2__HTC_PERIOD__SHIFT 0x8 #define PM_INTERVAL_CNTL_2__TDC_PERIOD_MASK 0xff0000 #define PM_INTERVAL_CNTL_2__TDC_PERIOD__SHIFT 0x10 #define PM_INTERVAL_CNTL_2__LPMX_PERIOD_MASK 0xff000000 #define PM_INTERVAL_CNTL_2__LPMX_PERIOD__SHIFT 0x18 #define VPC_INTERVAL_CNTL__VPC_PERIOD_MASK 0xffffffff #define VPC_INTERVAL_CNTL__VPC_PERIOD__SHIFT 0x0 #define DISP_PHY_TDP_LIMIT__DisplayPhyTdpLimit_MASK 0xffffffff #define DISP_PHY_TDP_LIMIT__DisplayPhyTdpLimit__SHIFT 0x0 #define FCH_PWR_CREDIT__FchPwrCredit_MASK 0xffffffff #define FCH_PWR_CREDIT__FchPwrCredit__SHIFT 0x0 #define PKGPWR_MV_AVG__Avg_Pkg_Pwr_MASK 0xffffffff #define PKGPWR_MV_AVG__Avg_Pkg_Pwr__SHIFT 0x0 #define PACKAGE_POWER__Pkg_power_MASK 0xffffffff #define PACKAGE_POWER__Pkg_power__SHIFT 0x0 #define PKG_PWR_CNTL__CpcGpuPerfPri_MASK 0x1 #define PKG_PWR_CNTL__CpcGpuPerfPri__SHIFT 0x0 #define PKG_PWR_CNTL__PkgPwrLimit_MASK 0x1fffe #define PKG_PWR_CNTL__PkgPwrLimit__SHIFT 0x1 #define PKG_PWR_CNTL__FchPwrCreditScale_MASK 0x7e0000 #define PKG_PWR_CNTL__FchPwrCreditScale__SHIFT 0x11 #define PKG_PWR_CNTL__PkgHystCoeff_MASK 0x1f800000 #define PKG_PWR_CNTL__PkgHystCoeff__SHIFT 0x17 #define PKG_PWR_CNTL__RESERVED_MASK 0xe0000000 #define PKG_PWR_CNTL__RESERVED__SHIFT 0x1d #define PKG_PWR_STATUS__GnbMinLimitSetFlag_MASK 0x1 #define PKG_PWR_STATUS__GnbMinLimitSetFlag__SHIFT 0x0 #define PKG_PWR_STATUS__PstateLimitSetFlag_MASK 0x2 #define PKG_PWR_STATUS__PstateLimitSetFlag__SHIFT 0x1 #define PKG_PWR_STATUS__PkgPwrLimit_base_MASK 0x3fffc #define PKG_PWR_STATUS__PkgPwrLimit_base__SHIFT 0x2 #define PKG_PWR_STATUS__RESERVED_MASK 0xfc0000 #define PKG_PWR_STATUS__RESERVED__SHIFT 0x12 #define PKG_PWR_STATUS__PkgPwr_MAWt_MASK 0xff000000 #define PKG_PWR_STATUS__PkgPwr_MAWt__SHIFT 0x18 #define DISP_PHY_CONFIG__Corner_MASK 0xff #define DISP_PHY_CONFIG__Corner__SHIFT 0x0 #define DISP_PHY_CONFIG__DispPHYConfig_MASK 0xff00 #define DISP_PHY_CONFIG__DispPHYConfig__SHIFT 0x8 #define GPU_TDP_LIMIT__Gpu_Tdp_Limit_MASK 0xffff #define GPU_TDP_LIMIT__Gpu_Tdp_Limit__SHIFT 0x0 #define GPU_TDP_LIMIT__Reserved_MASK 0xffff0000 #define GPU_TDP_LIMIT__Reserved__SHIFT 0x10 #define EXT_API_IN_DATA_0_0__byte0_MASK 0xff #define EXT_API_IN_DATA_0_0__byte0__SHIFT 0x0 #define EXT_API_IN_DATA_0_0__byte1_MASK 0xff00 #define EXT_API_IN_DATA_0_0__byte1__SHIFT 0x8 #define EXT_API_IN_DATA_0_0__byte2_MASK 0xff0000 #define EXT_API_IN_DATA_0_0__byte2__SHIFT 0x10 #define EXT_API_IN_DATA_0_0__byte3_MASK 0xff000000 #define EXT_API_IN_DATA_0_0__byte3__SHIFT 0x18 #define EXT_API_IN_DATA_0_1__byte0_MASK 0xff #define EXT_API_IN_DATA_0_1__byte0__SHIFT 0x0 #define EXT_API_IN_DATA_0_1__byte1_MASK 0xff00 #define EXT_API_IN_DATA_0_1__byte1__SHIFT 0x8 #define EXT_API_IN_DATA_0_1__byte2_MASK 0xff0000 #define EXT_API_IN_DATA_0_1__byte2__SHIFT 0x10 #define EXT_API_IN_DATA_0_1__byte3_MASK 0xff000000 #define EXT_API_IN_DATA_0_1__byte3__SHIFT 0x18 #define EXT_API_IN_DATA_0_2__byte0_MASK 0xff #define EXT_API_IN_DATA_0_2__byte0__SHIFT 0x0 #define EXT_API_IN_DATA_0_2__byte1_MASK 0xff00 #define EXT_API_IN_DATA_0_2__byte1__SHIFT 0x8 #define EXT_API_IN_DATA_0_2__byte2_MASK 0xff0000 #define EXT_API_IN_DATA_0_2__byte2__SHIFT 0x10 #define EXT_API_IN_DATA_0_2__byte3_MASK 0xff000000 #define EXT_API_IN_DATA_0_2__byte3__SHIFT 0x18 #define EXT_API_IN_DATA_0_3__byte0_MASK 0xff #define EXT_API_IN_DATA_0_3__byte0__SHIFT 0x0 #define EXT_API_IN_DATA_0_3__byte1_MASK 0xff00 #define EXT_API_IN_DATA_0_3__byte1__SHIFT 0x8 #define EXT_API_IN_DATA_0_3__byte2_MASK 0xff0000 #define EXT_API_IN_DATA_0_3__byte2__SHIFT 0x10 #define EXT_API_IN_DATA_0_3__byte3_MASK 0xff000000 #define EXT_API_IN_DATA_0_3__byte3__SHIFT 0x18 #define EXT_API_OUT_DATA_0_0__byte0_MASK 0xff #define EXT_API_OUT_DATA_0_0__byte0__SHIFT 0x0 #define EXT_API_OUT_DATA_0_0__byte1_MASK 0xff00 #define EXT_API_OUT_DATA_0_0__byte1__SHIFT 0x8 #define EXT_API_OUT_DATA_0_0__byte2_MASK 0xff0000 #define EXT_API_OUT_DATA_0_0__byte2__SHIFT 0x10 #define EXT_API_OUT_DATA_0_0__byte3_MASK 0xff000000 #define EXT_API_OUT_DATA_0_0__byte3__SHIFT 0x18 #define EXT_API_OUT_DATA_0_1__byte0_MASK 0xff #define EXT_API_OUT_DATA_0_1__byte0__SHIFT 0x0 #define EXT_API_OUT_DATA_0_1__byte1_MASK 0xff00 #define EXT_API_OUT_DATA_0_1__byte1__SHIFT 0x8 #define EXT_API_OUT_DATA_0_1__byte2_MASK 0xff0000 #define EXT_API_OUT_DATA_0_1__byte2__SHIFT 0x10 #define EXT_API_OUT_DATA_0_1__byte3_MASK 0xff000000 #define EXT_API_OUT_DATA_0_1__byte3__SHIFT 0x18 #define EXT_API_OUT_DATA_0_2__byte0_MASK 0xff #define EXT_API_OUT_DATA_0_2__byte0__SHIFT 0x0 #define EXT_API_OUT_DATA_0_2__byte1_MASK 0xff00 #define EXT_API_OUT_DATA_0_2__byte1__SHIFT 0x8 #define EXT_API_OUT_DATA_0_2__byte2_MASK 0xff0000 #define EXT_API_OUT_DATA_0_2__byte2__SHIFT 0x10 #define EXT_API_OUT_DATA_0_2__byte3_MASK 0xff000000 #define EXT_API_OUT_DATA_0_2__byte3__SHIFT 0x18 #define EXT_API_OUT_DATA_0_3__byte0_MASK 0xff #define EXT_API_OUT_DATA_0_3__byte0__SHIFT 0x0 #define EXT_API_OUT_DATA_0_3__byte1_MASK 0xff00 #define EXT_API_OUT_DATA_0_3__byte1__SHIFT 0x8 #define EXT_API_OUT_DATA_0_3__byte2_MASK 0xff0000 #define EXT_API_OUT_DATA_0_3__byte2__SHIFT 0x10 #define EXT_API_OUT_DATA_0_3__byte3_MASK 0xff000000 #define EXT_API_OUT_DATA_0_3__byte3__SHIFT 0x18 #define BAPM_PARAMETERS__MaxPwrCpu_1_MASK 0xff #define BAPM_PARAMETERS__MaxPwrCpu_1__SHIFT 0x0 #define BAPM_PARAMETERS__NomPwrCpu_1_MASK 0xff00 #define BAPM_PARAMETERS__NomPwrCpu_1__SHIFT 0x8 #define BAPM_PARAMETERS__MaxPwrCpu_0_MASK 0xff0000 #define BAPM_PARAMETERS__MaxPwrCpu_0__SHIFT 0x10 #define BAPM_PARAMETERS__NomPwrCpu_0_MASK 0xff000000 #define BAPM_PARAMETERS__NomPwrCpu_0__SHIFT 0x18 #define BAPM_PARAMETERS_2__MaxPwrGpu_MASK 0xffff #define BAPM_PARAMETERS_2__MaxPwrGpu__SHIFT 0x0 #define BAPM_PARAMETERS_2__NomPwrGpu_MASK 0xffff0000 #define BAPM_PARAMETERS_2__NomPwrGpu__SHIFT 0x10 #define BAPM_PARAMETERS_3__TjOffset_MASK 0xff #define BAPM_PARAMETERS_3__TjOffset__SHIFT 0x0 #define BAPM_PARAMETERS_3__EnergyCntNorm_MASK 0x3ff00 #define BAPM_PARAMETERS_3__EnergyCntNorm__SHIFT 0x8 #define BAPM_PARAMETERS_3__Reserved_MASK 0xfffc0000 #define BAPM_PARAMETERS_3__Reserved__SHIFT 0x12 #define BAPM_PARAMETERS_4__MinPwrGpu_MASK 0xffff #define BAPM_PARAMETERS_4__MinPwrGpu__SHIFT 0x0 #define BAPM_PARAMETERS_4__MidPwrCpu_1_MASK 0xff0000 #define BAPM_PARAMETERS_4__MidPwrCpu_1__SHIFT 0x10 #define BAPM_PARAMETERS_4__MidPwrCpu_0_MASK 0xff000000 #define BAPM_PARAMETERS_4__MidPwrCpu_0__SHIFT 0x18 #define SMU_SVI_TELEMETRY__Iddspike_OCP_MASK 0xffff #define SMU_SVI_TELEMETRY__Iddspike_OCP__SHIFT 0x0 #define SMU_SVI_TELEMETRY__IddNbspike_OCP_MASK 0xffff0000 #define SMU_SVI_TELEMETRY__IddNbspike_OCP__SHIFT 0x10 #define BAPM_STATUS__THROTTLE_MASK 0xff #define BAPM_STATUS__THROTTLE__SHIFT 0x0 #define BAPM_STATUS__THROTTLE_LAST_MASK 0xff00 #define BAPM_STATUS__THROTTLE_LAST__SHIFT 0x8 #define BAPM_STATUS__COUNT_CORE1_MASK 0xff0000 #define BAPM_STATUS__COUNT_CORE1__SHIFT 0x10 #define BAPM_STATUS__COUNT_CORE0_MASK 0xff000000 #define BAPM_STATUS__COUNT_CORE0__SHIFT 0x18 #define SMU_HTC_STATUS__HTC_ACTIVE_MASK 0x1 #define SMU_HTC_STATUS__HTC_ACTIVE__SHIFT 0x0 #define SMU_HTC_STATUS__Reserved_MASK 0xfffffffe #define SMU_HTC_STATUS__Reserved__SHIFT 0x1 #define SMU_VPC_STATUS__AllCpuIdleLast_MASK 0x1 #define SMU_VPC_STATUS__AllCpuIdleLast__SHIFT 0x0 #define SMU_VPC_STATUS__Reserved_MASK 0xfffffffe #define SMU_VPC_STATUS__Reserved__SHIFT 0x1 #define ENTITY_TEMPERATURES_1__CORE0_MASK 0xffffffff #define ENTITY_TEMPERATURES_1__CORE0__SHIFT 0x0 #define ENTITY_TEMPERATURES_2__CORE1_MASK 0xffffffff #define ENTITY_TEMPERATURES_2__CORE1__SHIFT 0x0 #define ENTITY_TEMPERATURES_3__GPU_MASK 0xffffffff #define ENTITY_TEMPERATURES_3__GPU__SHIFT 0x0 #define CU_POWER__CU0_POWER_MASK 0xffff #define CU_POWER__CU0_POWER__SHIFT 0x0 #define CU_POWER__CU1_POWER_MASK 0xffff0000 #define CU_POWER__CU1_POWER__SHIFT 0x10 #define GPU_POWER__IGPU_POWER_MASK 0xffff #define GPU_POWER__IGPU_POWER__SHIFT 0x0 #define GPU_POWER__DGPU_POWER_MASK 0xffff0000 #define GPU_POWER__DGPU_POWER__SHIFT 0x10 #define NTE_POWER__NTE0_POWER_MASK 0xffff #define NTE_POWER__NTE0_POWER__SHIFT 0x0 #define NTE_POWER__NTE1_POWER_MASK 0xffff0000 #define NTE_POWER__NTE1_POWER__SHIFT 0x10 #define TDC_STATUS__VDD_Boost_MASK 0xff #define TDC_STATUS__VDD_Boost__SHIFT 0x0 #define TDC_STATUS__VDD_Throttle_MASK 0xff00 #define TDC_STATUS__VDD_Throttle__SHIFT 0x8 #define TDC_STATUS__VDDNB_Boost_MASK 0xff0000 #define TDC_STATUS__VDDNB_Boost__SHIFT 0x10 #define TDC_STATUS__VDDNB_Throttle_MASK 0xff000000 #define TDC_STATUS__VDDNB_Throttle__SHIFT 0x18 #define TDC_MV_AVERAGE__IDD_MASK 0xffff #define TDC_MV_AVERAGE__IDD__SHIFT 0x0 #define TDC_MV_AVERAGE__IDDNB_MASK 0xffff0000 #define TDC_MV_AVERAGE__IDDNB__SHIFT 0x10 #define PM_CONFIG__Enable_VPC_Accumulators_MASK 0x1 #define PM_CONFIG__Enable_VPC_Accumulators__SHIFT 0x0 #define PM_CONFIG__Enable_BAPM_MASK 0x2 #define PM_CONFIG__Enable_BAPM__SHIFT 0x1 #define PM_CONFIG__Enable_TDC_Limit_MASK 0x4 #define PM_CONFIG__Enable_TDC_Limit__SHIFT 0x2 #define PM_CONFIG__Enable_LPMx_MASK 0x8 #define PM_CONFIG__Enable_LPMx__SHIFT 0x3 #define PM_CONFIG__Enable_HTC_Limit_MASK 0x10 #define PM_CONFIG__Enable_HTC_Limit__SHIFT 0x4 #define PM_CONFIG__Enable_NBDPM_MASK 0x20 #define PM_CONFIG__Enable_NBDPM__SHIFT 0x5 #define PM_CONFIG__Enable_LoadLine_MASK 0x40 #define PM_CONFIG__Enable_LoadLine__SHIFT 0x6 #define PM_CONFIG__Reserved_MASK 0xff80 #define PM_CONFIG__Reserved__SHIFT 0x7 #define PM_CONFIG__Override_VPC_Current_MASK 0x10000 #define PM_CONFIG__Override_VPC_Current__SHIFT 0x10 #define PM_CONFIG__Reserved1_MASK 0x60000 #define PM_CONFIG__Reserved1__SHIFT 0x11 #define PM_CONFIG__Override_Calc_Temp_MASK 0x80000 #define PM_CONFIG__Override_Calc_Temp__SHIFT 0x13 #define PM_CONFIG__Enable_Hybrid_Boost_MASK 0x100000 #define PM_CONFIG__Enable_Hybrid_Boost__SHIFT 0x14 #define PM_CONFIG__Reserved2_MASK 0xe00000 #define PM_CONFIG__Reserved2__SHIFT 0x15 #define PM_CONFIG__PSTATE_AllCpusIdle_MASK 0x7000000 #define PM_CONFIG__PSTATE_AllCpusIdle__SHIFT 0x18 #define PM_CONFIG__NBPSTATE_AllCpusIdle_MASK 0x8000000 #define PM_CONFIG__NBPSTATE_AllCpusIdle__SHIFT 0x1b #define PM_CONFIG__Reserved3_MASK 0x10000000 #define PM_CONFIG__Reserved3__SHIFT 0x1c #define PM_CONFIG__SVI_Mode_MASK 0x20000000 #define PM_CONFIG__SVI_Mode__SHIFT 0x1d #define PM_CONFIG__Enable_PDM_MASK 0x40000000 #define PM_CONFIG__Enable_PDM__SHIFT 0x1e #define PM_CONFIG__Enable_PKG_PWR_LIMIT_MASK 0x80000000 #define PM_CONFIG__Enable_PKG_PWR_LIMIT__SHIFT 0x1f #define TE0_TEMPERATURE_READ_ADDR__CSR_ADDR_MASK 0x3f #define TE0_TEMPERATURE_READ_ADDR__CSR_ADDR__SHIFT 0x0 #define TE0_TEMPERATURE_READ_ADDR__TCEN_ID_MASK 0x3c0 #define TE0_TEMPERATURE_READ_ADDR__TCEN_ID__SHIFT 0x6 #define TE0_TEMPERATURE_READ_ADDR__RESERVED_MASK 0xfffffc00 #define TE0_TEMPERATURE_READ_ADDR__RESERVED__SHIFT 0xa #define TE1_TEMPERATURE_READ_ADDR__CSR_ADDR_MASK 0x3f #define TE1_TEMPERATURE_READ_ADDR__CSR_ADDR__SHIFT 0x0 #define TE1_TEMPERATURE_READ_ADDR__TCEN_ID_MASK 0x3c0 #define TE1_TEMPERATURE_READ_ADDR__TCEN_ID__SHIFT 0x6 #define TE1_TEMPERATURE_READ_ADDR__RESERVED_MASK 0xfffffc00 #define TE1_TEMPERATURE_READ_ADDR__RESERVED__SHIFT 0xa #define TE2_TEMPERATURE_READ_ADDR__CSR_ADDR_MASK 0x3f #define TE2_TEMPERATURE_READ_ADDR__CSR_ADDR__SHIFT 0x0 #define TE2_TEMPERATURE_READ_ADDR__TCEN_ID_MASK 0x3c0 #define TE2_TEMPERATURE_READ_ADDR__TCEN_ID__SHIFT 0x6 #define TE2_TEMPERATURE_READ_ADDR__RESERVED_MASK 0xfffffc00 #define TE2_TEMPERATURE_READ_ADDR__RESERVED__SHIFT 0xa #define NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK 0xff #define NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT 0x0 #define NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK 0xff00 #define NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT 0x8 #define NB_DPM_CONFIG_1__DpmXNbPsLo_MASK 0xff0000 #define NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT 0x10 #define NB_DPM_CONFIG_1__DpmXNbPsHi_MASK 0xff000000 #define NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT 0x18 #define NB_DPM_CONFIG_2__Hysteresis_MASK 0xff #define NB_DPM_CONFIG_2__Hysteresis__SHIFT 0x0 #define NB_DPM_CONFIG_2__SkipPG_MASK 0xff00 #define NB_DPM_CONFIG_2__SkipPG__SHIFT 0x8 #define NB_DPM_CONFIG_2__SkipDPM0_MASK 0xff0000 #define NB_DPM_CONFIG_2__SkipDPM0__SHIFT 0x10 #define NB_DPM_CONFIG_2__EnablePSI1_MASK 0xff000000 #define NB_DPM_CONFIG_2__EnablePSI1__SHIFT 0x18 #define NB_DPM_CONFIG_3__RESERVED_MASK 0xffffff #define NB_DPM_CONFIG_3__RESERVED__SHIFT 0x0 #define NB_DPM_CONFIG_3__EnableDpmPstatePoll_MASK 0xff000000 #define NB_DPM_CONFIG_3__EnableDpmPstatePoll__SHIFT 0x18 #define SMU_IDD_OVERRIDE__IDD_MASK 0xffff #define SMU_IDD_OVERRIDE__IDD__SHIFT 0x0 #define SMU_IDD_OVERRIDE__IDDNB_MASK 0xffff0000 #define SMU_IDD_OVERRIDE__IDDNB__SHIFT 0x10 #define AVS_CONFIG__AvsEnabledForPstates_MASK 0xff #define AVS_CONFIG__AvsEnabledForPstates__SHIFT 0x0 #define AVS_CONFIG__AvsOverrideEnabled_MASK 0x100 #define AVS_CONFIG__AvsOverrideEnabled__SHIFT 0x8 #define AVS_CONFIG__AvsPsmTempCompensation_MASK 0x200 #define AVS_CONFIG__AvsPsmTempCompensation__SHIFT 0x9 #define AVS_CONFIG__RESERVED1_MASK 0xfc00 #define AVS_CONFIG__RESERVED1__SHIFT 0xa #define AVS_CONFIG__AvsOverrideOffset_MASK 0xff0000 #define AVS_CONFIG__AvsOverrideOffset__SHIFT 0x10 #define AVS_CONFIG__RESERVED_MASK 0xff000000 #define AVS_CONFIG__RESERVED__SHIFT 0x18 #define TDC_VRM_LIMIT__IDD_MASK 0xffff #define TDC_VRM_LIMIT__IDD__SHIFT 0x0 #define TDC_VRM_LIMIT__IDDNB_MASK 0xffff0000 #define TDC_VRM_LIMIT__IDDNB__SHIFT 0x10 #define CU0_PSM_CONFIG__Psm4_MASK 0xff #define CU0_PSM_CONFIG__Psm4__SHIFT 0x0 #define CU0_PSM_CONFIG__Psm3_MASK 0xff00 #define CU0_PSM_CONFIG__Psm3__SHIFT 0x8 #define CU0_PSM_CONFIG__Psm2_MASK 0xff0000 #define CU0_PSM_CONFIG__Psm2__SHIFT 0x10 #define CU0_PSM_CONFIG__Psm1_MASK 0xff000000 #define CU0_PSM_CONFIG__Psm1__SHIFT 0x18 #define CU1_PSM_CONFIG__Psm4_MASK 0xff #define CU1_PSM_CONFIG__Psm4__SHIFT 0x0 #define CU1_PSM_CONFIG__Psm3_MASK 0xff00 #define CU1_PSM_CONFIG__Psm3__SHIFT 0x8 #define CU1_PSM_CONFIG__Psm2_MASK 0xff0000 #define CU1_PSM_CONFIG__Psm2__SHIFT 0x10 #define CU1_PSM_CONFIG__Psm1_MASK 0xff000000 #define CU1_PSM_CONFIG__Psm1__SHIFT 0x18 #define SPMI_CONFIG__SpmiTestCode_MASK 0xff #define SPMI_CONFIG__SpmiTestCode__SHIFT 0x0 #define SPMI_CONFIG__SpmiTestData_MASK 0xff00 #define SPMI_CONFIG__SpmiTestData__SHIFT 0x8 #define SPMI_CONFIG__RESERVED_MASK 0xffff0000 #define SPMI_CONFIG__RESERVED__SHIFT 0x10 #define SPMI_SMC_CHAIN_ADDR__Addr_MASK 0xffffffff #define SPMI_SMC_CHAIN_ADDR__Addr__SHIFT 0x0 #define SPMI_STATUS__OpDone_MASK 0xff #define SPMI_STATUS__OpDone__SHIFT 0x0 #define SPMI_STATUS__OpFailed_MASK 0xff00 #define SPMI_STATUS__OpFailed__SHIFT 0x8 #define AVSNB_CONFIG__AvsEnabledForPstates_MASK 0xf #define AVSNB_CONFIG__AvsEnabledForPstates__SHIFT 0x0 #define AVSNB_CONFIG__RESERVED0_MASK 0xf0 #define AVSNB_CONFIG__RESERVED0__SHIFT 0x4 #define AVSNB_CONFIG__AvsOverrideEnabled_MASK 0x100 #define AVSNB_CONFIG__AvsOverrideEnabled__SHIFT 0x8 #define AVSNB_CONFIG__AvsPsmTempCompensation_MASK 0x200 #define AVSNB_CONFIG__AvsPsmTempCompensation__SHIFT 0x9 #define AVSNB_CONFIG__RESERVED1_MASK 0xfc00 #define AVSNB_CONFIG__RESERVED1__SHIFT 0xa #define AVSNB_CONFIG__AvsOverrideOffset_MASK 0xff0000 #define AVSNB_CONFIG__AvsOverrideOffset__SHIFT 0x10 #define AVSNB_CONFIG__RESERVED_MASK 0xff000000 #define AVSNB_CONFIG__RESERVED__SHIFT 0x18 #define HTC_CONFIG__CSR_ADDR_MASK 0x3f #define HTC_CONFIG__CSR_ADDR__SHIFT 0x0 #define HTC_CONFIG__TCEN_ID_MASK 0x3c0 #define HTC_CONFIG__TCEN_ID__SHIFT 0x6 #define HTC_CONFIG__HTC_ACTIVE_PSTATE_LIMIT_MASK 0xff0000 #define HTC_CONFIG__HTC_ACTIVE_PSTATE_LIMIT__SHIFT 0x10 #define HTC_CONFIG__Reserved_MASK 0xff000000 #define HTC_CONFIG__Reserved__SHIFT 0x18 #define AVS_CU0_TEMPERATURE_SENSOR__CsrAddr_MASK 0x3f #define AVS_CU0_TEMPERATURE_SENSOR__CsrAddr__SHIFT 0x0 #define AVS_CU0_TEMPERATURE_SENSOR__TcenID_MASK 0x3c0 #define AVS_CU0_TEMPERATURE_SENSOR__TcenID__SHIFT 0x6 #define AVS_CU0_TEMPERATURE_SENSOR__RESERVED_MASK 0xfffffc00 #define AVS_CU0_TEMPERATURE_SENSOR__RESERVED__SHIFT 0xa #define AVS_CU1_TEMPERATURE_SENSOR__CsrAddr_MASK 0x3f #define AVS_CU1_TEMPERATURE_SENSOR__CsrAddr__SHIFT 0x0 #define AVS_CU1_TEMPERATURE_SENSOR__TcenID_MASK 0x3c0 #define AVS_CU1_TEMPERATURE_SENSOR__TcenID__SHIFT 0x6 #define AVS_CU1_TEMPERATURE_SENSOR__RESERVED_MASK 0xfffffc00 #define AVS_CU1_TEMPERATURE_SENSOR__RESERVED__SHIFT 0xa #define AVS_GNB_TEMPERATURE_SENSOR__CsrAddr_MASK 0x3f #define AVS_GNB_TEMPERATURE_SENSOR__CsrAddr__SHIFT 0x0 #define AVS_GNB_TEMPERATURE_SENSOR__TcenID_MASK 0x3c0 #define AVS_GNB_TEMPERATURE_SENSOR__TcenID__SHIFT 0x6 #define AVS_GNB_TEMPERATURE_SENSOR__RESERVED_MASK 0xfffffc00 #define AVS_GNB_TEMPERATURE_SENSOR__RESERVED__SHIFT 0xa #define AVS_UNB_TEMPERATURE_SENSOR__CsrAddr_MASK 0x3f #define AVS_UNB_TEMPERATURE_SENSOR__CsrAddr__SHIFT 0x0 #define AVS_UNB_TEMPERATURE_SENSOR__TcenID_MASK 0x3c0 #define AVS_UNB_TEMPERATURE_SENSOR__TcenID__SHIFT 0x6 #define AVS_UNB_TEMPERATURE_SENSOR__RESERVED_MASK 0xfffffc00 #define AVS_UNB_TEMPERATURE_SENSOR__RESERVED__SHIFT 0xa #define SMU_MONITOR_PORT80_MMIO_ADDR__MMIO_ADDRESS_MASK 0xffffffff #define SMU_MONITOR_PORT80_MMIO_ADDR__MMIO_ADDRESS__SHIFT 0x0 #define SMU_MONITOR_PORT80_MEMBASE_HI__MEMORY_BASE_HI_MASK 0xffffffff #define SMU_MONITOR_PORT80_MEMBASE_HI__MEMORY_BASE_HI__SHIFT 0x0 #define SMU_MONITOR_PORT80_MEMBASE_LO__MEMORY_BASE_LO_MASK 0xffffffff #define SMU_MONITOR_PORT80_MEMBASE_LO__MEMORY_BASE_LO__SHIFT 0x0 #define SMU_MONITOR_PORT80_MEMSETUP__MEMORY_POSITION_MASK 0xffff #define SMU_MONITOR_PORT80_MEMSETUP__MEMORY_POSITION__SHIFT 0x0 #define SMU_MONITOR_PORT80_MEMSETUP__MEMORY_BUFFER_SIZE_MASK 0xffff0000 #define SMU_MONITOR_PORT80_MEMSETUP__MEMORY_BUFFER_SIZE__SHIFT 0x10 #define SMU_MONITOR_PORT80_CTRL__ENABLE_DRAM_SHADOW_MASK 0x1 #define SMU_MONITOR_PORT80_CTRL__ENABLE_DRAM_SHADOW__SHIFT 0x0 #define SMU_MONITOR_PORT80_CTRL__ENABLE_CSR_SHADOW_MASK 0x2 #define SMU_MONITOR_PORT80_CTRL__ENABLE_CSR_SHADOW__SHIFT 0x1 #define SMU_MONITOR_PORT80_CTRL__RESERVED_MASK 0xfffc #define SMU_MONITOR_PORT80_CTRL__RESERVED__SHIFT 0x2 #define SMU_MONITOR_PORT80_CTRL__POLLING_INTERVAL_MASK 0xffff0000 #define SMU_MONITOR_PORT80_CTRL__POLLING_INTERVAL__SHIFT 0x10 #define SMU_TCEN_ALIVE__CORE_TCEN_ID_MASK 0xff #define SMU_TCEN_ALIVE__CORE_TCEN_ID__SHIFT 0x0 #define SMU_TCEN_ALIVE__GNB_TCEN_ID_MASK 0xff00 #define SMU_TCEN_ALIVE__GNB_TCEN_ID__SHIFT 0x8 #define SMU_TCEN_ALIVE__RESERVED_MASK 0xffff0000 #define SMU_TCEN_ALIVE__RESERVED__SHIFT 0x10 #define PDM_STATUS__PDM_ENABLED_MASK 0x1 #define PDM_STATUS__PDM_ENABLED__SHIFT 0x0 #define PDM_STATUS__NewCpcTdpLimit_MASK 0x1fffe #define PDM_STATUS__NewCpcTdpLimit__SHIFT 0x1 #define PDM_STATUS__NoofConnectedCores_MASK 0x1e0000 #define PDM_STATUS__NoofConnectedCores__SHIFT 0x11 #define PDM_STATUS__Reserved_MASK 0xffe00000 #define PDM_STATUS__Reserved__SHIFT 0x15 #define PDM_CNTL_1__BaseCoreTdpLimit0_MASK 0xff #define PDM_CNTL_1__BaseCoreTdpLimit0__SHIFT 0x0 #define PDM_CNTL_1__BaseCoreTdpLimit1_MASK 0xff00 #define PDM_CNTL_1__BaseCoreTdpLimit1__SHIFT 0x8 #define PDM_CNTL_1__BaseCoreTdpLimit2_MASK 0xff0000 #define PDM_CNTL_1__BaseCoreTdpLimit2__SHIFT 0x10 #define PDM_CNTL_1__GpuPdmMult_MASK 0xff000000 #define PDM_CNTL_1__GpuPdmMult__SHIFT 0x18 #define PDM_CNTL_2__HeatPdmTc_MASK 0xff #define PDM_CNTL_2__HeatPdmTc__SHIFT 0x0 #define PDM_CNTL_2__CoolPdmTc_MASK 0xff00 #define PDM_CNTL_2__CoolPdmTc__SHIFT 0x8 #define PDM_CNTL_2__GpuPdmTc_MASK 0xff0000 #define PDM_CNTL_2__GpuPdmTc__SHIFT 0x10 #define PDM_CNTL_2__GpuActThr_MASK 0xff000000 #define PDM_CNTL_2__GpuActThr__SHIFT 0x18 #define PDM_CNTL_3__HeatPdmThr1_MASK 0xff #define PDM_CNTL_3__HeatPdmThr1__SHIFT 0x0 #define PDM_CNTL_3__HeatPdmThr2_MASK 0xff00 #define PDM_CNTL_3__HeatPdmThr2__SHIFT 0x8 #define PDM_CNTL_3__CoolPdmThr1_MASK 0xff0000 #define PDM_CNTL_3__CoolPdmThr1__SHIFT 0x10 #define PDM_CNTL_3__CoolPdmThr2_MASK 0xff000000 #define PDM_CNTL_3__CoolPdmThr2__SHIFT 0x18 #define SMU_PM_STATUS_0__DATA_MASK 0xffffffff #define SMU_PM_STATUS_0__DATA__SHIFT 0x0 #define SMU_PM_STATUS_1__DATA_MASK 0xffffffff #define SMU_PM_STATUS_1__DATA__SHIFT 0x0 #define SMU_PM_STATUS_2__DATA_MASK 0xffffffff #define SMU_PM_STATUS_2__DATA__SHIFT 0x0 #define SMU_PM_STATUS_3__DATA_MASK 0xffffffff #define SMU_PM_STATUS_3__DATA__SHIFT 0x0 #define SMU_PM_STATUS_4__DATA_MASK 0xffffffff #define SMU_PM_STATUS_4__DATA__SHIFT 0x0 #define SMU_PM_STATUS_5__DATA_MASK 0xffffffff #define SMU_PM_STATUS_5__DATA__SHIFT 0x0 #define SMU_PM_STATUS_6__DATA_MASK 0xffffffff #define SMU_PM_STATUS_6__DATA__SHIFT 0x0 #define SMU_PM_STATUS_7__DATA_MASK 0xffffffff #define SMU_PM_STATUS_7__DATA__SHIFT 0x0 #define SMU_PM_STATUS_8__DATA_MASK 0xffffffff #define SMU_PM_STATUS_8__DATA__SHIFT 0x0 #define SMU_PM_STATUS_9__DATA_MASK 0xffffffff #define SMU_PM_STATUS_9__DATA__SHIFT 0x0 #define SMU_PM_STATUS_10__DATA_MASK 0xffffffff #define SMU_PM_STATUS_10__DATA__SHIFT 0x0 #define SMU_PM_STATUS_11__DATA_MASK 0xffffffff #define SMU_PM_STATUS_11__DATA__SHIFT 0x0 #define SMU_PM_STATUS_12__DATA_MASK 0xffffffff #define SMU_PM_STATUS_12__DATA__SHIFT 0x0 #define SMU_PM_STATUS_13__DATA_MASK 0xffffffff #define SMU_PM_STATUS_13__DATA__SHIFT 0x0 #define SMU_PM_STATUS_14__DATA_MASK 0xffffffff #define SMU_PM_STATUS_14__DATA__SHIFT 0x0 #define SMU_PM_STATUS_15__DATA_MASK 0xffffffff #define SMU_PM_STATUS_15__DATA__SHIFT 0x0 #define SMU_PM_STATUS_16__DATA_MASK 0xffffffff #define SMU_PM_STATUS_16__DATA__SHIFT 0x0 #define SMU_PM_STATUS_17__DATA_MASK 0xffffffff #define SMU_PM_STATUS_17__DATA__SHIFT 0x0 #define SMU_PM_STATUS_18__DATA_MASK 0xffffffff #define SMU_PM_STATUS_18__DATA__SHIFT 0x0 #define SMU_PM_STATUS_19__DATA_MASK 0xffffffff #define SMU_PM_STATUS_19__DATA__SHIFT 0x0 #define SMU_PM_STATUS_20__DATA_MASK 0xffffffff #define SMU_PM_STATUS_20__DATA__SHIFT 0x0 #define SMU_PM_STATUS_21__DATA_MASK 0xffffffff #define SMU_PM_STATUS_21__DATA__SHIFT 0x0 #define SMU_PM_STATUS_22__DATA_MASK 0xffffffff #define SMU_PM_STATUS_22__DATA__SHIFT 0x0 #define SMU_PM_STATUS_23__DATA_MASK 0xffffffff #define SMU_PM_STATUS_23__DATA__SHIFT 0x0 #define SMU_PM_STATUS_24__DATA_MASK 0xffffffff #define SMU_PM_STATUS_24__DATA__SHIFT 0x0 #define SMU_PM_STATUS_25__DATA_MASK 0xffffffff #define SMU_PM_STATUS_25__DATA__SHIFT 0x0 #define SMU_PM_STATUS_26__DATA_MASK 0xffffffff #define SMU_PM_STATUS_26__DATA__SHIFT 0x0 #define SMU_PM_STATUS_27__DATA_MASK 0xffffffff #define SMU_PM_STATUS_27__DATA__SHIFT 0x0 #define SMU_PM_STATUS_28__DATA_MASK 0xffffffff #define SMU_PM_STATUS_28__DATA__SHIFT 0x0 #define SMU_PM_STATUS_29__DATA_MASK 0xffffffff #define SMU_PM_STATUS_29__DATA__SHIFT 0x0 #define SMU_PM_STATUS_30__DATA_MASK 0xffffffff #define SMU_PM_STATUS_30__DATA__SHIFT 0x0 #define SMU_PM_STATUS_31__DATA_MASK 0xffffffff #define SMU_PM_STATUS_31__DATA__SHIFT 0x0 #define SMU_PM_STATUS_32__DATA_MASK 0xffffffff #define SMU_PM_STATUS_32__DATA__SHIFT 0x0 #define SMU_PM_STATUS_33__DATA_MASK 0xffffffff #define SMU_PM_STATUS_33__DATA__SHIFT 0x0 #define SMU_PM_STATUS_34__DATA_MASK 0xffffffff #define SMU_PM_STATUS_34__DATA__SHIFT 0x0 #define SMU_PM_STATUS_35__DATA_MASK 0xffffffff #define SMU_PM_STATUS_35__DATA__SHIFT 0x0 #define SMU_PM_STATUS_36__DATA_MASK 0xffffffff #define SMU_PM_STATUS_36__DATA__SHIFT 0x0 #define SMU_PM_STATUS_37__DATA_MASK 0xffffffff #define SMU_PM_STATUS_37__DATA__SHIFT 0x0 #define SMU_PM_STATUS_38__DATA_MASK 0xffffffff #define SMU_PM_STATUS_38__DATA__SHIFT 0x0 #define SMU_PM_STATUS_39__DATA_MASK 0xffffffff #define SMU_PM_STATUS_39__DATA__SHIFT 0x0 #define SMU_PM_STATUS_40__DATA_MASK 0xffffffff #define SMU_PM_STATUS_40__DATA__SHIFT 0x0 #define SMU_PM_STATUS_41__DATA_MASK 0xffffffff #define SMU_PM_STATUS_41__DATA__SHIFT 0x0 #define SMU_PM_STATUS_42__DATA_MASK 0xffffffff #define SMU_PM_STATUS_42__DATA__SHIFT 0x0 #define SMU_PM_STATUS_43__DATA_MASK 0xffffffff #define SMU_PM_STATUS_43__DATA__SHIFT 0x0 #define SMU_PM_STATUS_44__DATA_MASK 0xffffffff #define SMU_PM_STATUS_44__DATA__SHIFT 0x0 #define SMU_PM_STATUS_45__DATA_MASK 0xffffffff #define SMU_PM_STATUS_45__DATA__SHIFT 0x0 #define SMU_PM_STATUS_46__DATA_MASK 0xffffffff #define SMU_PM_STATUS_46__DATA__SHIFT 0x0 #define SMU_PM_STATUS_47__DATA_MASK 0xffffffff #define SMU_PM_STATUS_47__DATA__SHIFT 0x0 #define SMU_PM_STATUS_48__DATA_MASK 0xffffffff #define SMU_PM_STATUS_48__DATA__SHIFT 0x0 #define SMU_PM_STATUS_49__DATA_MASK 0xffffffff #define SMU_PM_STATUS_49__DATA__SHIFT 0x0 #define SMU_PM_STATUS_50__DATA_MASK 0xffffffff #define SMU_PM_STATUS_50__DATA__SHIFT 0x0 #define SMU_PM_STATUS_51__DATA_MASK 0xffffffff #define SMU_PM_STATUS_51__DATA__SHIFT 0x0 #define SMU_PM_STATUS_52__DATA_MASK 0xffffffff #define SMU_PM_STATUS_52__DATA__SHIFT 0x0 #define SMU_PM_STATUS_53__DATA_MASK 0xffffffff #define SMU_PM_STATUS_53__DATA__SHIFT 0x0 #define SMU_PM_STATUS_54__DATA_MASK 0xffffffff #define SMU_PM_STATUS_54__DATA__SHIFT 0x0 #define SMU_PM_STATUS_55__DATA_MASK 0xffffffff #define SMU_PM_STATUS_55__DATA__SHIFT 0x0 #define SMU_PM_STATUS_56__DATA_MASK 0xffffffff #define SMU_PM_STATUS_56__DATA__SHIFT 0x0 #define SMU_PM_STATUS_57__DATA_MASK 0xffffffff #define SMU_PM_STATUS_57__DATA__SHIFT 0x0 #define SMU_PM_STATUS_58__DATA_MASK 0xffffffff #define SMU_PM_STATUS_58__DATA__SHIFT 0x0 #define SMU_PM_STATUS_59__DATA_MASK 0xffffffff #define SMU_PM_STATUS_59__DATA__SHIFT 0x0 #define SMU_PM_STATUS_60__DATA_MASK 0xffffffff #define SMU_PM_STATUS_60__DATA__SHIFT 0x0 #define SMU_PM_STATUS_61__DATA_MASK 0xffffffff #define SMU_PM_STATUS_61__DATA__SHIFT 0x0 #define SMU_PM_STATUS_62__DATA_MASK 0xffffffff #define SMU_PM_STATUS_62__DATA__SHIFT 0x0 #define SMU_PM_STATUS_63__DATA_MASK 0xffffffff #define SMU_PM_STATUS_63__DATA__SHIFT 0x0 #define SMU_PM_STATUS_64__DATA_MASK 0xffffffff #define SMU_PM_STATUS_64__DATA__SHIFT 0x0 #define SMU_PM_STATUS_65__DATA_MASK 0xffffffff #define SMU_PM_STATUS_65__DATA__SHIFT 0x0 #define SMU_PM_STATUS_66__DATA_MASK 0xffffffff #define SMU_PM_STATUS_66__DATA__SHIFT 0x0 #define SMU_PM_STATUS_67__DATA_MASK 0xffffffff #define SMU_PM_STATUS_67__DATA__SHIFT 0x0 #define SMU_PM_STATUS_68__DATA_MASK 0xffffffff #define SMU_PM_STATUS_68__DATA__SHIFT 0x0 #define SMU_PM_STATUS_69__DATA_MASK 0xffffffff #define SMU_PM_STATUS_69__DATA__SHIFT 0x0 #define SMU_PM_STATUS_70__DATA_MASK 0xffffffff #define SMU_PM_STATUS_70__DATA__SHIFT 0x0 #define SMU_PM_STATUS_71__DATA_MASK 0xffffffff #define SMU_PM_STATUS_71__DATA__SHIFT 0x0 #define SMU_PM_STATUS_72__DATA_MASK 0xffffffff #define SMU_PM_STATUS_72__DATA__SHIFT 0x0 #define SMU_PM_STATUS_73__DATA_MASK 0xffffffff #define SMU_PM_STATUS_73__DATA__SHIFT 0x0 #define SMU_PM_STATUS_74__DATA_MASK 0xffffffff #define SMU_PM_STATUS_74__DATA__SHIFT 0x0 #define SMU_PM_STATUS_75__DATA_MASK 0xffffffff #define SMU_PM_STATUS_75__DATA__SHIFT 0x0 #define SMU_PM_STATUS_76__DATA_MASK 0xffffffff #define SMU_PM_STATUS_76__DATA__SHIFT 0x0 #define SMU_PM_STATUS_77__DATA_MASK 0xffffffff #define SMU_PM_STATUS_77__DATA__SHIFT 0x0 #define SMU_PM_STATUS_78__DATA_MASK 0xffffffff #define SMU_PM_STATUS_78__DATA__SHIFT 0x0 #define SMU_PM_STATUS_79__DATA_MASK 0xffffffff #define SMU_PM_STATUS_79__DATA__SHIFT 0x0 #define SMU_PM_STATUS_80__DATA_MASK 0xffffffff #define SMU_PM_STATUS_80__DATA__SHIFT 0x0 #define SMU_PM_STATUS_81__DATA_MASK 0xffffffff #define SMU_PM_STATUS_81__DATA__SHIFT 0x0 #define SMU_PM_STATUS_82__DATA_MASK 0xffffffff #define SMU_PM_STATUS_82__DATA__SHIFT 0x0 #define SMU_PM_STATUS_83__DATA_MASK 0xffffffff #define SMU_PM_STATUS_83__DATA__SHIFT 0x0 #define SMU_PM_STATUS_84__DATA_MASK 0xffffffff #define SMU_PM_STATUS_84__DATA__SHIFT 0x0 #define SMU_PM_STATUS_85__DATA_MASK 0xffffffff #define SMU_PM_STATUS_85__DATA__SHIFT 0x0 #define SMU_PM_STATUS_86__DATA_MASK 0xffffffff #define SMU_PM_STATUS_86__DATA__SHIFT 0x0 #define SMU_PM_STATUS_87__DATA_MASK 0xffffffff #define SMU_PM_STATUS_87__DATA__SHIFT 0x0 #define SMU_PM_STATUS_88__DATA_MASK 0xffffffff #define SMU_PM_STATUS_88__DATA__SHIFT 0x0 #define SMU_PM_STATUS_89__DATA_MASK 0xffffffff #define SMU_PM_STATUS_89__DATA__SHIFT 0x0 #define SMU_PM_STATUS_90__DATA_MASK 0xffffffff #define SMU_PM_STATUS_90__DATA__SHIFT 0x0 #define SMU_PM_STATUS_91__DATA_MASK 0xffffffff #define SMU_PM_STATUS_91__DATA__SHIFT 0x0 #define SMU_PM_STATUS_92__DATA_MASK 0xffffffff #define SMU_PM_STATUS_92__DATA__SHIFT 0x0 #define SMU_PM_STATUS_93__DATA_MASK 0xffffffff #define SMU_PM_STATUS_93__DATA__SHIFT 0x0 #define SMU_PM_STATUS_94__DATA_MASK 0xffffffff #define SMU_PM_STATUS_94__DATA__SHIFT 0x0 #define SMU_PM_STATUS_95__DATA_MASK 0xffffffff #define SMU_PM_STATUS_95__DATA__SHIFT 0x0 #define SMU_PM_STATUS_96__DATA_MASK 0xffffffff #define SMU_PM_STATUS_96__DATA__SHIFT 0x0 #define SMU_PM_STATUS_97__DATA_MASK 0xffffffff #define SMU_PM_STATUS_97__DATA__SHIFT 0x0 #define SMU_PM_STATUS_98__DATA_MASK 0xffffffff #define SMU_PM_STATUS_98__DATA__SHIFT 0x0 #define SMU_PM_STATUS_99__DATA_MASK 0xffffffff #define SMU_PM_STATUS_99__DATA__SHIFT 0x0 #define SMU_PM_STATUS_100__DATA_MASK 0xffffffff #define SMU_PM_STATUS_100__DATA__SHIFT 0x0 #define SMU_PM_STATUS_101__DATA_MASK 0xffffffff #define SMU_PM_STATUS_101__DATA__SHIFT 0x0 #define SMU_PM_STATUS_102__DATA_MASK 0xffffffff #define SMU_PM_STATUS_102__DATA__SHIFT 0x0 #define SMU_PM_STATUS_103__DATA_MASK 0xffffffff #define SMU_PM_STATUS_103__DATA__SHIFT 0x0 #define SMU_PM_STATUS_104__DATA_MASK 0xffffffff #define SMU_PM_STATUS_104__DATA__SHIFT 0x0 #define SMU_PM_STATUS_105__DATA_MASK 0xffffffff #define SMU_PM_STATUS_105__DATA__SHIFT 0x0 #define SMU_PM_STATUS_106__DATA_MASK 0xffffffff #define SMU_PM_STATUS_106__DATA__SHIFT 0x0 #define SMU_PM_STATUS_107__DATA_MASK 0xffffffff #define SMU_PM_STATUS_107__DATA__SHIFT 0x0 #define SMU_PM_STATUS_108__DATA_MASK 0xffffffff #define SMU_PM_STATUS_108__DATA__SHIFT 0x0 #define SMU_PM_STATUS_109__DATA_MASK 0xffffffff #define SMU_PM_STATUS_109__DATA__SHIFT 0x0 #define SMU_PM_STATUS_110__DATA_MASK 0xffffffff #define SMU_PM_STATUS_110__DATA__SHIFT 0x0 #define SMU_PM_STATUS_111__DATA_MASK 0xffffffff #define SMU_PM_STATUS_111__DATA__SHIFT 0x0 #define SMU_PM_STATUS_112__DATA_MASK 0xffffffff #define SMU_PM_STATUS_112__DATA__SHIFT 0x0 #define SMU_PM_STATUS_113__DATA_MASK 0xffffffff #define SMU_PM_STATUS_113__DATA__SHIFT 0x0 #define SMU_PM_STATUS_114__DATA_MASK 0xffffffff #define SMU_PM_STATUS_114__DATA__SHIFT 0x0 #define SMU_PM_STATUS_115__DATA_MASK 0xffffffff #define SMU_PM_STATUS_115__DATA__SHIFT 0x0 #define SMU_PM_STATUS_116__DATA_MASK 0xffffffff #define SMU_PM_STATUS_116__DATA__SHIFT 0x0 #define SMU_PM_STATUS_117__DATA_MASK 0xffffffff #define SMU_PM_STATUS_117__DATA__SHIFT 0x0 #define SMU_PM_STATUS_118__DATA_MASK 0xffffffff #define SMU_PM_STATUS_118__DATA__SHIFT 0x0 #define SMU_PM_STATUS_119__DATA_MASK 0xffffffff #define SMU_PM_STATUS_119__DATA__SHIFT 0x0 #define SMU_PM_STATUS_120__DATA_MASK 0xffffffff #define SMU_PM_STATUS_120__DATA__SHIFT 0x0 #define SMU_PM_STATUS_121__DATA_MASK 0xffffffff #define SMU_PM_STATUS_121__DATA__SHIFT 0x0 #define SMU_PM_STATUS_122__DATA_MASK 0xffffffff #define SMU_PM_STATUS_122__DATA__SHIFT 0x0 #define SMU_PM_STATUS_123__DATA_MASK 0xffffffff #define SMU_PM_STATUS_123__DATA__SHIFT 0x0 #define SMU_PM_STATUS_124__DATA_MASK 0xffffffff #define SMU_PM_STATUS_124__DATA__SHIFT 0x0 #define SMU_PM_STATUS_125__DATA_MASK 0xffffffff #define SMU_PM_STATUS_125__DATA__SHIFT 0x0 #define SMU_PM_STATUS_126__DATA_MASK 0xffffffff #define SMU_PM_STATUS_126__DATA__SHIFT 0x0 #define SMU_PM_STATUS_127__DATA_MASK 0xffffffff #define SMU_PM_STATUS_127__DATA__SHIFT 0x0 #define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1 #define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0 #define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2 #define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1 #define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4 #define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2 #define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8 #define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3 #define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10 #define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4 #define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20 #define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5 #define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff #define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0 #define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00 #define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8 #define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000 #define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10 #define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000 #define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18 #define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000 #define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19 #define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000 #define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a #define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000 #define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b #define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000 #define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c #define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1 #define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0 #define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2 #define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1 #define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4 #define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2 #define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8 #define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3 #define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1 #define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0 #define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2 #define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1 #define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4 #define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2 #define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8 #define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3 #define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40 #define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6 #define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100 #define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8 #define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200 #define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9 #define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400 #define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa #define GENERAL_PWRMGT__SPARE11_MASK 0x800 #define GENERAL_PWRMGT__SPARE11__SHIFT 0xb #define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000 #define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe #define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000 #define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf #define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000 #define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10 #define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000 #define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11 #define GENERAL_PWRMGT__SPARE18_MASK 0x40000 #define GENERAL_PWRMGT__SPARE18__SHIFT 0x12 #define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000 #define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13 #define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000 #define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17 #define GENERAL_PWRMGT__SPARE27_MASK 0x8000000 #define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b #define GENERAL_PWRMGT__SPARE_MASK 0xf0000000 #define GENERAL_PWRMGT__SPARE__SHIFT 0x1c #define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3 #define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0 #define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4 #define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2 #define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8 #define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3 #define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10 #define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4 #define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0 #define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5 #define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1 #define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0 #define SCLK_PWRMGT_CNTL__SCLK_LOW_D1_MASK 0x2 #define SCLK_PWRMGT_CNTL__SCLK_LOW_D1__SHIFT 0x1 #define SCLK_PWRMGT_CNTL__DYN_PWR_DOWN_EN_MASK 0x4 #define SCLK_PWRMGT_CNTL__DYN_PWR_DOWN_EN__SHIFT 0x2 #define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10 #define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4 #define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20 #define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5 #define SCLK_PWRMGT_CNTL__RESERVED_0_MASK 0x40 #define SCLK_PWRMGT_CNTL__RESERVED_0__SHIFT 0x6 #define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN_MASK 0x80 #define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN__SHIFT 0x7 #define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON_MASK 0x100 #define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON__SHIFT 0x8 #define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF_MASK 0x200 #define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF__SHIFT 0x9 #define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF_MASK 0x400 #define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF__SHIFT 0xa #define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1_MASK 0x800 #define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1__SHIFT 0xb #define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2_MASK 0x1000 #define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2__SHIFT 0xc #define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3_MASK 0x2000 #define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3__SHIFT 0xd #define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000 #define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe #define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000 #define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf #define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000 #define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10 #define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000 #define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15 #define SCLK_PWRMGT_CNTL__DPM_DYN_PWR_DOWN_CNTL_MASK 0x400000 #define SCLK_PWRMGT_CNTL__DPM_DYN_PWR_DOWN_CNTL__SHIFT 0x16 #define SCLK_PWRMGT_CNTL__DPM_DYN_PWR_DOWN_EN_MASK 0x800000 #define SCLK_PWRMGT_CNTL__DPM_DYN_PWR_DOWN_EN__SHIFT 0x17 #define SCLK_PWRMGT_CNTL__RESERVED_3_MASK 0x1000000 #define SCLK_PWRMGT_CNTL__RESERVED_3__SHIFT 0x18 #define SCLK_PWRMGT_CNTL__VOLTAGE_UPDATE_EN_MASK 0x2000000 #define SCLK_PWRMGT_CNTL__VOLTAGE_UPDATE_EN__SHIFT 0x19 #define SCLK_PWRMGT_CNTL__FORCE_PM0_INTERRUPT_MASK 0x10000000 #define SCLK_PWRMGT_CNTL__FORCE_PM0_INTERRUPT__SHIFT 0x1c #define SCLK_PWRMGT_CNTL__FORCE_PM1_INTERRUPT_MASK 0x20000000 #define SCLK_PWRMGT_CNTL__FORCE_PM1_INTERRUPT__SHIFT 0x1d #define SCLK_PWRMGT_CNTL__GFX_VOLTAGE_CHANGE_EN_MASK 0x40000000 #define SCLK_PWRMGT_CNTL__GFX_VOLTAGE_CHANGE_EN__SHIFT 0x1e #define SCLK_PWRMGT_CNTL__GFX_VOLTAGE_CHANGE_MODE_MASK 0x80000000 #define SCLK_PWRMGT_CNTL__GFX_VOLTAGE_CHANGE_MODE__SHIFT 0x1f #define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf #define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0 #define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0 #define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4 #define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00 #define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8 #define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000 #define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc #define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000 #define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10 #define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000 #define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15 #define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000 #define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a #define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000 #define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 #define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 #define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 #define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 #define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 #define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 #define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 #define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 #define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 #define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 #define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 #define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 #define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 #define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 #define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 #define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 #define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 #define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 #define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 #define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 #define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 #define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa #define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 #define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb #define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 #define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc #define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 #define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd #define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 #define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe #define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 #define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf #define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 #define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 #define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 #define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 #define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 #define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 #define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 #define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 #define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 #define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 #define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 #define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 #define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 #define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 #define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 #define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 #define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 #define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 #define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 #define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 #define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 #define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a #define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 #define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b #define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 #define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c #define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 #define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d #define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 #define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e #define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf #define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0 #define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0 #define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4 #define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00 #define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8 #define PLL_TEST_CNTL__TST_RESET_MASK 0x8000 #define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf #define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000 #define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11 #define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff #define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0 #define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000 #define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10 #define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3 #define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0 #define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0 #define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4 #define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000 #define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14 #define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000 #define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18 #define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000 #define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c #define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff #define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0 #define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f #define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0 #define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80 #define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7 #define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7 #define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0 #define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8 #define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3 #define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0 #define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4 #define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000 #define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10 #define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000 #define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11 #define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000 #define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12 #define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000 #define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13 #define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000 #define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14 #define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000 #define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15 #define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000 #define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16 #define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000 #define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17 #define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000 #define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18 #define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000 #define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19 #define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000 #define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a #define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000 #define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b #define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000 #define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c #define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000 #define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d #define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000 #define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e #define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000 #define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f #define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1 #define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0 #define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2 #define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1 #define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4 #define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2 #define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8 #define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3 #define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10 #define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4 #define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40 #define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6 #define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80 #define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7 #define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100 #define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8 #define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200 #define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9 #define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400 #define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa #define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800 #define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb #define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000 #define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc #define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000 #define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd #define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000 #define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe #define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000 #define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15 #define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000 #define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa #define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb #define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc #define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd #define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe #define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000 #define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf #define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7 #define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0 #define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38 #define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3 #define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000 #define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10 #define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000 #define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11 #define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000 #define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14 #define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7 #define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0 #define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8 #define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3 #define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0 #define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4 #define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000 #define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10 #define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000 #define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f #define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1 #define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0 #define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2 #define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1 #define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4 #define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2 #define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8 #define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3 #define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10 #define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4 #define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20 #define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200 #define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9 #define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400 #define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa #define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800 #define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb #define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000 #define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc #define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000 #define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd #define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000 #define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe #define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000 #define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf #define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000 #define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10 #define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000 #define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11 #define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000 #define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12 #define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000 #define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13 #define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000 #define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14 #define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000 #define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15 #define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000 #define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16 #define SMU_VOLTAGE_STATUS__SMU_VOLTAGE_STATUS_MASK 0x1 #define SMU_VOLTAGE_STATUS__SMU_VOLTAGE_STATUS__SHIFT 0x0 #define SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK 0x1fe #define SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT 0x1 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c #define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff #define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0 #define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000 #define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10 #define SCLK_MIN_DIV__FRACV_MASK 0xfff #define SCLK_MIN_DIV__FRACV__SHIFT 0x0 #define SCLK_MIN_DIV__INTV_MASK 0x7f000 #define SCLK_MIN_DIV__INTV__SHIFT 0xc #define LCAC_SX0_CNTL__SX0_ENABLE_MASK 0x1 #define LCAC_SX0_CNTL__SX0_ENABLE__SHIFT 0x0 #define LCAC_SX0_CNTL__SX0_THRESHOLD_MASK 0x1fffe #define LCAC_SX0_CNTL__SX0_THRESHOLD__SHIFT 0x1 #define LCAC_SX0_CNTL__SX0_BLOCK_ID_MASK 0x3e0000 #define LCAC_SX0_CNTL__SX0_BLOCK_ID__SHIFT 0x11 #define LCAC_SX0_CNTL__SX0_SIGNAL_ID_MASK 0x3fc00000 #define LCAC_SX0_CNTL__SX0_SIGNAL_ID__SHIFT 0x16 #define LCAC_SX0_OVR_SEL__SX0_OVR_SEL_MASK 0xffffffff #define LCAC_SX0_OVR_SEL__SX0_OVR_SEL__SHIFT 0x0 #define LCAC_SX0_OVR_VAL__SX0_OVR_VAL_MASK 0xffffffff #define LCAC_SX0_OVR_VAL__SX0_OVR_VAL__SHIFT 0x0 #define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1 #define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0 #define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe #define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1 #define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000 #define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11 #define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000 #define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16 #define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff #define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0 #define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff #define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0 #define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1 #define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0 #define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe #define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1 #define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000 #define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11 #define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000 #define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16 #define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff #define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0 #define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff #define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0 #define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1 #define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0 #define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe #define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1 #define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000 #define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11 #define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000 #define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16 #define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff #define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0 #define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff #define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0 #define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1 #define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0 #define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe #define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1 #define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000 #define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11 #define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000 #define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16 #define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff #define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0 #define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff #define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0 #define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1 #define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0 #define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe #define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1 #define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000 #define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11 #define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000 #define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16 #define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff #define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0 #define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff #define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0 #endif /* SMU_7_0_0_SH_MASK_H */
null
null
null
null
99,265
26,344
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,339
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * HDMI PLL * * Copyright (C) 2013 Texas Instruments Incorporated * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #define DSS_SUBSYS_NAME "HDMIPLL" #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/seq_file.h> #include <linux/pm_runtime.h> #include "omapdss.h" #include "dss.h" #include "hdmi.h" void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s) { #define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\ hdmi_read_reg(pll->base, r)) DUMPPLL(PLLCTRL_PLL_CONTROL); DUMPPLL(PLLCTRL_PLL_STATUS); DUMPPLL(PLLCTRL_PLL_GO); DUMPPLL(PLLCTRL_CFG1); DUMPPLL(PLLCTRL_CFG2); DUMPPLL(PLLCTRL_CFG3); DUMPPLL(PLLCTRL_SSC_CFG1); DUMPPLL(PLLCTRL_SSC_CFG2); DUMPPLL(PLLCTRL_CFG4); } static int hdmi_pll_enable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; int r; r = pm_runtime_get_sync(&pll->pdev->dev); WARN_ON(r < 0); dss_ctrl_pll_enable(DSS_PLL_HDMI, true); r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS); if (r) return r; return 0; } static void hdmi_pll_disable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; int r; hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); dss_ctrl_pll_enable(DSS_PLL_HDMI, false); r = pm_runtime_put_sync(&pll->pdev->dev); WARN_ON(r < 0 && r != -ENOSYS); } static const struct dss_pll_ops dsi_pll_ops = { .enable = hdmi_pll_enable, .disable = hdmi_pll_disable, .set_config = dss_pll_write_config_type_b, }; static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { .type = DSS_PLL_TYPE_B, .n_max = 255, .m_min = 20, .m_max = 4095, .mX_max = 127, .fint_min = 500000, .fint_max = 2500000, .clkdco_min = 500000000, .clkdco_low = 1000000000, .clkdco_max = 2000000000, .n_msb = 8, .n_lsb = 1, .m_msb = 20, .m_lsb = 9, .mX_msb[0] = 24, .mX_lsb[0] = 18, .has_selfreqdco = true, }; static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { .type = DSS_PLL_TYPE_B, .n_max = 255, .m_min = 20, .m_max = 2045, .mX_max = 127, .fint_min = 620000, .fint_max = 2500000, .clkdco_min = 750000000, .clkdco_low = 1500000000, .clkdco_max = 2500000000UL, .n_msb = 8, .n_lsb = 1, .m_msb = 20, .m_lsb = 9, .mX_msb[0] = 24, .mX_lsb[0] = 18, .has_selfreqdco = true, .has_refsel = true, }; static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data *hpll) { struct dss_pll *pll = &hpll->pll; struct clk *clk; int r; clk = devm_clk_get(&pdev->dev, "sys_clk"); if (IS_ERR(clk)) { DSSERR("can't get sys_clk\n"); return PTR_ERR(clk); } pll->name = "hdmi"; pll->id = DSS_PLL_HDMI; pll->base = hpll->base; pll->clkin = clk; switch (omapdss_get_version()) { case OMAPDSS_VER_OMAP4430_ES1: case OMAPDSS_VER_OMAP4430_ES2: case OMAPDSS_VER_OMAP4: pll->hw = &dss_omap4_hdmi_pll_hw; break; case OMAPDSS_VER_OMAP5: case OMAPDSS_VER_DRA7xx: pll->hw = &dss_omap5_hdmi_pll_hw; break; default: return -ENODEV; } pll->ops = &dsi_pll_ops; r = dss_pll_register(pll); if (r) return r; return 0; } int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, struct hdmi_wp_data *wp) { int r; struct resource *res; pll->pdev = pdev; pll->wp = wp; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); if (!res) { DSSERR("can't get PLL mem resource\n"); return -EINVAL; } pll->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pll->base)) { DSSERR("can't ioremap PLLCTRL\n"); return PTR_ERR(pll->base); } r = dsi_init_pll_data(pdev, pll); if (r) { DSSERR("failed to init HDMI PLL\n"); return r; } return 0; } void hdmi_pll_uninit(struct hdmi_pll_data *hpll) { struct dss_pll *pll = &hpll->pll; dss_pll_unregister(pll); }
null
null
null
null
99,686
14,056
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,056
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/offline_pages/core/prefetch/test_prefetch_dispatcher.h" #include "components/offline_pages/core/offline_page_item.h" #include "components/offline_pages/core/prefetch/prefetch_background_task.h" namespace offline_pages { TestPrefetchDispatcher::TestPrefetchDispatcher() = default; TestPrefetchDispatcher::~TestPrefetchDispatcher() = default; void TestPrefetchDispatcher::AddCandidatePrefetchURLs( const std::string& name_space, const std::vector<PrefetchURL>& prefetch_urls) { latest_name_space = name_space; latest_prefetch_urls = prefetch_urls; new_suggestions_count++; } void TestPrefetchDispatcher::RemoveAllUnprocessedPrefetchURLs( const std::string& name_space) { latest_prefetch_urls.clear(); remove_all_suggestions_count++; } void TestPrefetchDispatcher::RemovePrefetchURLsByClientId( const ClientId& client_id) { remove_by_client_id_count++; last_removed_client_id = std::make_unique<ClientId>(client_id); } void TestPrefetchDispatcher::BeginBackgroundTask( std::unique_ptr<PrefetchBackgroundTask> task) {} void TestPrefetchDispatcher::StopBackgroundTask() {} void TestPrefetchDispatcher::SetService(PrefetchService* service) {} void TestPrefetchDispatcher::SchedulePipelineProcessing() { processing_schedule_count++; } void TestPrefetchDispatcher::EnsureTaskScheduled() { task_schedule_count++; } void TestPrefetchDispatcher::GCMOperationCompletedMessageReceived( const std::string& operation_name) { operation_list.push_back(operation_name); } void TestPrefetchDispatcher::CleanupDownloads( const std::set<std::string>& outstanding_download_ids, const std::map<std::string, std::pair<base::FilePath, int64_t>>& success_downloads) { cleanup_downloads_count++; } void TestPrefetchDispatcher::DownloadCompleted( const PrefetchDownloadResult& download_result) { download_results.push_back(download_result); } void TestPrefetchDispatcher::ItemDownloaded(int64_t offline_id, const ClientId& client_id) { item_downloaded_results.push_back(std::make_pair(offline_id, client_id)); } void TestPrefetchDispatcher::ArchiveImported(int64_t offline_id, bool success) { import_results.push_back(std::make_pair(offline_id, success)); } } // namespace offline_pages
null
null
null
null
10,919
65,830
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
65,830
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/browsing_data/browsing_data_indexed_db_helper.h" #include <tuple> #include <vector> #include "base/bind.h" #include "base/location.h" #include "base/time/time.h" #include "chrome/browser/browsing_data/browsing_data_helper.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/indexed_db_context.h" using content::BrowserThread; using content::IndexedDBContext; using content::IndexedDBInfo; BrowsingDataIndexedDBHelper::BrowsingDataIndexedDBHelper( IndexedDBContext* indexed_db_context) : indexed_db_context_(indexed_db_context) { DCHECK(indexed_db_context_.get()); } BrowsingDataIndexedDBHelper::~BrowsingDataIndexedDBHelper() { } void BrowsingDataIndexedDBHelper::StartFetching(const FetchCallback& callback) { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(!callback.is_null()); indexed_db_context_->TaskRunner()->PostTask( FROM_HERE, base::BindOnce( &BrowsingDataIndexedDBHelper::FetchIndexedDBInfoInIndexedDBThread, this, callback)); } void BrowsingDataIndexedDBHelper::DeleteIndexedDB(const GURL& origin) { DCHECK_CURRENTLY_ON(BrowserThread::UI); indexed_db_context_->TaskRunner()->PostTask( FROM_HERE, base::BindOnce( &BrowsingDataIndexedDBHelper::DeleteIndexedDBInIndexedDBThread, this, origin)); } void BrowsingDataIndexedDBHelper::FetchIndexedDBInfoInIndexedDBThread( const FetchCallback& callback) { DCHECK(indexed_db_context_->TaskRunner()->RunsTasksInCurrentSequence()); DCHECK(!callback.is_null()); std::vector<IndexedDBInfo> origins = indexed_db_context_->GetAllOriginsInfo(); std::list<content::IndexedDBInfo> result; for (const IndexedDBInfo& origin : origins) { if (!BrowsingDataHelper::HasWebScheme(origin.origin)) continue; // Non-websafe state is not considered browsing data. result.push_back(origin); } BrowserThread::PostTask(BrowserThread::UI, FROM_HERE, base::BindOnce(callback, result)); } void BrowsingDataIndexedDBHelper::DeleteIndexedDBInIndexedDBThread( const GURL& origin) { DCHECK(indexed_db_context_->TaskRunner()->RunsTasksInCurrentSequence()); indexed_db_context_->DeleteForOrigin(origin); } CannedBrowsingDataIndexedDBHelper:: PendingIndexedDBInfo::PendingIndexedDBInfo(const GURL& origin, const base::string16& name) : origin(origin), name(name) { } CannedBrowsingDataIndexedDBHelper:: PendingIndexedDBInfo::~PendingIndexedDBInfo() { } bool CannedBrowsingDataIndexedDBHelper::PendingIndexedDBInfo::operator<( const PendingIndexedDBInfo& other) const { return std::tie(origin, name) < std::tie(other.origin, other.name); } CannedBrowsingDataIndexedDBHelper::CannedBrowsingDataIndexedDBHelper( content::IndexedDBContext* context) : BrowsingDataIndexedDBHelper(context) { } CannedBrowsingDataIndexedDBHelper::~CannedBrowsingDataIndexedDBHelper() {} void CannedBrowsingDataIndexedDBHelper::AddIndexedDB( const GURL& origin, const base::string16& name) { if (!BrowsingDataHelper::HasWebScheme(origin)) return; // Non-websafe state is not considered browsing data. pending_indexed_db_info_.insert(PendingIndexedDBInfo(origin, name)); } void CannedBrowsingDataIndexedDBHelper::Reset() { pending_indexed_db_info_.clear(); } bool CannedBrowsingDataIndexedDBHelper::empty() const { return pending_indexed_db_info_.empty(); } size_t CannedBrowsingDataIndexedDBHelper::GetIndexedDBCount() const { return pending_indexed_db_info_.size(); } const std::set<CannedBrowsingDataIndexedDBHelper::PendingIndexedDBInfo>& CannedBrowsingDataIndexedDBHelper::GetIndexedDBInfo() const { return pending_indexed_db_info_; } void CannedBrowsingDataIndexedDBHelper::StartFetching( const FetchCallback& callback) { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(!callback.is_null()); std::list<IndexedDBInfo> result; for (const PendingIndexedDBInfo& pending_info : pending_indexed_db_info_) { IndexedDBInfo info(pending_info.origin, 0, base::Time(), 0); result.push_back(info); } BrowserThread::PostTask(BrowserThread::UI, FROM_HERE, base::BindOnce(callback, result)); } void CannedBrowsingDataIndexedDBHelper::DeleteIndexedDB( const GURL& origin) { for (std::set<PendingIndexedDBInfo>::iterator it = pending_indexed_db_info_.begin(); it != pending_indexed_db_info_.end(); ) { if (it->origin == origin) pending_indexed_db_info_.erase(it++); else ++it; } BrowsingDataIndexedDBHelper::DeleteIndexedDB(origin); }
null
null
null
null
62,693
39,622
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
204,617
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#include "sched.h" /* * idle-task scheduling class. * * (NOTE: these are not related to SCHED_IDLE tasks which are * handled in sched/fair.c) */ #ifdef CONFIG_SMP static int select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) { return task_cpu(p); /* IDLE tasks as never migrated */ } #endif /* CONFIG_SMP */ /* * Idle tasks are unconditionally rescheduled: */ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) { resched_curr(rq); } static struct task_struct * pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { put_prev_task(rq, prev); update_idle_core(rq); schedstat_inc(rq->sched_goidle); return rq->idle; } /* * It is not legal to sleep in the idle task - print a warning * message if some code attempts to do it: */ static void dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { raw_spin_unlock_irq(&rq->lock); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); raw_spin_lock_irq(&rq->lock); } static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { rq_last_tick_reset(rq); } static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) { } static void set_curr_task_idle(struct rq *rq) { } static void switched_to_idle(struct rq *rq, struct task_struct *p) { BUG(); } static void prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) { BUG(); } static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) { return 0; } static void update_curr_idle(struct rq *rq) { } /* * Simple, special scheduling class for the per-CPU idle tasks: */ const struct sched_class idle_sched_class = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ /* dequeue is not valid, we print a debug message there: */ .dequeue_task = dequeue_task_idle, .check_preempt_curr = check_preempt_curr_idle, .pick_next_task = pick_next_task_idle, .put_prev_task = put_prev_task_idle, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_idle, .set_cpus_allowed = set_cpus_allowed_common, #endif .set_curr_task = set_curr_task_idle, .task_tick = task_tick_idle, .get_rr_interval = get_rr_interval_idle, .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, .update_curr = update_curr_idle, };
null
null
null
null
112,964
3,412
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
156,469
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Metar Gear Solid: The Twin Snakes demuxer * Copyright (c) 2012 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "libavutil/intfloat.h" #include "avformat.h" #include "riff.h" static int read_probe(AVProbeData *p) { if (AV_RB32(p->buf ) != 0x000E || AV_RB32(p->buf + 4) != 0x0050 || AV_RB32(p->buf + 12) != 0x0034) return 0; return AVPROBE_SCORE_MAX; } static int read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; AVStream *st; AVRational fps; uint32_t chunk_size; avio_skip(pb, 4); chunk_size = avio_rb32(pb); if (chunk_size != 80) return AVERROR(EIO); avio_skip(pb, 20); st = avformat_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->need_parsing = AVSTREAM_PARSE_HEADERS; st->start_time = 0; st->nb_frames = st->duration = avio_rb32(pb); fps = av_d2q(av_int2float(avio_rb32(pb)), INT_MAX); st->codecpar->width = avio_rb32(pb); st->codecpar->height = avio_rb32(pb); avio_skip(pb, 12); st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_tag = avio_rb32(pb); st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, st->codecpar->codec_tag); avpriv_set_pts_info(st, 64, fps.den, fps.num); avio_skip(pb, 20); return 0; } static int read_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; uint32_t chunk_size, payload_size; int ret; if (avio_feof(pb)) return AVERROR_EOF; avio_skip(pb, 4); chunk_size = avio_rb32(pb); avio_skip(pb, 4); payload_size = avio_rb32(pb); if (chunk_size < payload_size + 16) return AVERROR(EIO); ret = av_get_packet(pb, pkt, payload_size); if (ret < 0) return ret; pkt->pos -= 16; pkt->duration = 1; avio_skip(pb, chunk_size - (ret + 16)); return ret; } AVInputFormat ff_mgsts_demuxer = { .name = "mgsts", .long_name = NULL_IF_CONFIG_SMALL("Metal Gear Solid: The Twin Snakes"), .read_probe = read_probe, .read_header = read_header, .read_packet = read_packet, .flags = AVFMT_GENERIC_INDEX, };
null
null
null
null
72,524
26,335
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
26,335
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "printing/print_job_constants.h" #include "printing/buildflags/buildflags.h" namespace printing { // True if this is the first preview request. const char kIsFirstRequest[] = "isFirstRequest"; // Unique ID sent along every preview request. const char kPreviewRequestID[] = "requestID"; // Unique ID to identify a print preview UI. const char kPreviewUIID[] = "previewUIID"; // Capabilities option. Contains the capabilities in CDD format. const char kSettingCapabilities[] = "capabilities"; // Print using cloud print: true if selected, false if not. const char kSettingCloudPrintId[] = "cloudPrintID"; // Print using cloud print dialog: true if selected, false if not. const char kSettingCloudPrintDialog[] = "printWithCloudPrint"; // Print job setting 'collate'. const char kSettingCollate[] = "collate"; // Print out color: true for color, false for grayscale. const char kSettingColor[] = "color"; // Default to color on or not. const char kSettingSetColorAsDefault[] = "setColorAsDefault"; // Key that specifies the height of the content area of the page. const char kSettingContentHeight[] = "contentHeight"; // Key that specifies the width of the content area of the page. const char kSettingContentWidth[] = "contentWidth"; // Number of copies. const char kSettingCopies[] = "copies"; // Device name: Unique printer identifier. const char kSettingDeviceName[] = "deviceName"; // Option to disable scaling. True if scaling is disabled else false. const char kSettingDisableScaling[] = "disableScaling"; // Horizontal DPI const char kSettingDpiHorizontal[] = "dpiHorizontal"; // Vertical DPI const char kSettingDpiVertical[] = "dpiVertical"; // Scaling value required to fit the document to page. const char kSettingFitToPageScaling[] = "fitToPageScaling"; // Print job duplex mode. const char kSettingDuplexMode[] = "duplex"; // Option to fit source page contents to printer paper size: true if // selected else false. const char kSettingFitToPageEnabled[] = "fitToPageEnabled"; // True, when a new set of draft preview data is required. const char kSettingGenerateDraftData[] = "generateDraftData"; // Option to print headers and Footers: true if selected, false if not. const char kSettingHeaderFooterEnabled[] = "headerFooterEnabled"; // Interstice or gap between different header footer components. Hardcoded to // about 0.5cm, match the value in PrintSettings::SetPrinterPrintableArea. const float kSettingHeaderFooterInterstice = 14.2f; // Key that specifies the date of the page that will be printed in the headers // and footers. const char kSettingHeaderFooterDate[] = "date"; // Key that specifies the title of the page that will be printed in the headers // and footers. const char kSettingHeaderFooterTitle[] = "title"; // Key that specifies the URL of the page that will be printed in the headers // and footers. const char kSettingHeaderFooterURL[] = "url"; // Page orientation: true for landscape, false for portrait. const char kSettingLandscape[] = "landscape"; // Key that specifies the requested media size. const char kSettingMediaSize[] = "mediaSize"; // Key that specifies the requested media height in microns. const char kSettingMediaSizeHeightMicrons[] = "height_microns"; // Key that specifies the requested media width in microns. const char kSettingMediaSizeWidthMicrons[] = "width_microns"; // Key that specifies the requested media platform specific vendor id. const char kSettingMediaSizeVendorId[] = "vendor_id"; // Key that specifies whether the requested media is a default one. const char kSettingMediaSizeIsDefault[] = "is_default"; // Key that specifies the bottom margin of the page. const char kSettingMarginBottom[] = "marginBottom"; // Key that specifies the left margin of the page. const char kSettingMarginLeft[] = "marginLeft"; // Key that specifies the right margin of the page. const char kSettingMarginRight[] = "marginRight"; // Key that specifies the top margin of the page. const char kSettingMarginTop[] = "marginTop"; // Key that specifies the dictionary of custom margins as set by the user. const char kSettingMarginsCustom[] = "marginsCustom"; // Key that specifies the type of margins to use. Value is an int from the // MarginType enum. const char kSettingMarginsType[] = "marginsType"; // Number of pages to print. const char kSettingPreviewPageCount[] = "pageCount"; // A page range. const char kSettingPageRange[] = "pageRange"; // The first page of a page range. (1-based) const char kSettingPageRangeFrom[] = "from"; // The last page of a page range. (1-based) const char kSettingPageRangeTo[] = "to"; // Page size of document to print. const char kSettingPageWidth[] = "pageWidth"; const char kSettingPageHeight[] = "pageHeight"; const char kSettingPreviewModifiable[] = "previewModifiable"; // Keys that specifies the printable area details. const char kSettingPrintableAreaX[] = "printableAreaX"; const char kSettingPrintableAreaY[] = "printableAreaY"; const char kSettingPrintableAreaWidth[] = "printableAreaWidth"; const char kSettingPrintableAreaHeight[] = "printableAreaHeight"; // Printer name. const char kSettingPrinterName[] = "printerName"; // Printer description. const char kSettingPrinterDescription[] = "printerDescription"; // Additional printer options. const char kSettingPrinterOptions[] = "printerOptions"; // Print to PDF option: true if selected, false if not. const char kSettingPrintToPDF[] = "printToPDF"; // Print using Privet option: true if destination is a Privet printer, false if // not. const char kSettingPrintWithPrivet[] = "printWithPrivet"; // Print using extension option: true if destination is an extension printer, // false if not. const char kSettingPrintWithExtension[] = "printWithExtension"; // Scaling factor const char kSettingScaleFactor[] = "scaleFactor"; // Whether to rasterize the PDF for printing. const char kSettingRasterizePdf[] = "rasterizePDF"; // Ticket option. Contains the ticket in CJT format. const char kSettingTicket[] = "ticket"; // Whether to print CSS backgrounds. const char kSettingShouldPrintBackgrounds[] = "shouldPrintBackgrounds"; // Whether to print selection only. const char kSettingShouldPrintSelectionOnly[] = "shouldPrintSelectionOnly"; #if BUILDFLAG(ENABLE_BASIC_PRINTING) // Whether to print using the system dialog. const char kSettingShowSystemDialog[] = "showSystemDialog"; #endif // Indices used to represent first preview page and complete preview document. const int FIRST_PAGE_INDEX = 0; const int COMPLETE_PREVIEW_DOCUMENT_INDEX = -1; // Whether to show PDF in view provided by OS. Implemented for MacOS only. const char kSettingOpenPDFInPreview[] = "OpenPDFInPreview"; #if defined (USE_CUPS) const char kBlack[] = "Black"; const char kCMYK[] = "CMYK"; const char kKCMY[] = "KCMY"; const char kCMY_K[] = "CMY+K"; const char kCMY[] = "CMY"; const char kColor[] = "Color"; const char kFullColor[] = "FullColor"; const char kGray[] = "Gray"; const char kGrayscale[] = "Grayscale"; const char kGreyscale[] = "Greyscale"; const char kMono[] = "Mono"; const char kMonochrome[] = "Monochrome"; const char kNormal[] = "Normal"; const char kNormalGray[] = "Normal.Gray"; const char kRGB[] = "RGB"; const char kRGBA[] = "RGBA"; const char kRGB16[] = "RGB16"; #endif } // namespace printing
null
null
null
null
23,198
34,253
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
199,248
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Xilinx Video IP Composite Device * * Copyright (C) 2013-2015 Ideas on Board * Copyright (C) 2013-2015 Xilinx, Inc. * * Contacts: Hyun Kwon <hyun.kwon@xilinx.com> * Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/v4l2-async.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-of.h> #include "xilinx-dma.h" #include "xilinx-vipp.h" #define XVIPP_DMA_S2MM 0 #define XVIPP_DMA_MM2S 1 /** * struct xvip_graph_entity - Entity in the video graph * @list: list entry in a graph entities list * @node: the entity's DT node * @entity: media entity, from the corresponding V4L2 subdev * @asd: subdev asynchronous registration information * @subdev: V4L2 subdev */ struct xvip_graph_entity { struct list_head list; struct device_node *node; struct media_entity *entity; struct v4l2_async_subdev asd; struct v4l2_subdev *subdev; }; /* ----------------------------------------------------------------------------- * Graph Management */ static struct xvip_graph_entity * xvip_graph_find_entity(struct xvip_composite_device *xdev, const struct device_node *node) { struct xvip_graph_entity *entity; list_for_each_entry(entity, &xdev->entities, list) { if (entity->node == node) return entity; } return NULL; } static int xvip_graph_build_one(struct xvip_composite_device *xdev, struct xvip_graph_entity *entity) { u32 link_flags = MEDIA_LNK_FL_ENABLED; struct media_entity *local = entity->entity; struct media_entity *remote; struct media_pad *local_pad; struct media_pad *remote_pad; struct xvip_graph_entity *ent; struct v4l2_of_link link; struct device_node *ep = NULL; struct device_node *next; int ret = 0; dev_dbg(xdev->dev, "creating links for entity %s\n", local->name); while (1) { /* Get the next endpoint and parse its link. */ next = of_graph_get_next_endpoint(entity->node, ep); if (next == NULL) break; of_node_put(ep); ep = next; dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name); ret = v4l2_of_parse_link(ep, &link); if (ret < 0) { dev_err(xdev->dev, "failed to parse link for %s\n", ep->full_name); continue; } /* Skip sink ports, they will be processed from the other end of * the link. */ if (link.local_port >= local->num_pads) { dev_err(xdev->dev, "invalid port number %u on %s\n", link.local_port, link.local_node->full_name); v4l2_of_put_link(&link); ret = -EINVAL; break; } local_pad = &local->pads[link.local_port]; if (local_pad->flags & MEDIA_PAD_FL_SINK) { dev_dbg(xdev->dev, "skipping sink port %s:%u\n", link.local_node->full_name, link.local_port); v4l2_of_put_link(&link); continue; } /* Skip DMA engines, they will be processed separately. */ if (link.remote_node == xdev->dev->of_node) { dev_dbg(xdev->dev, "skipping DMA port %s:%u\n", link.local_node->full_name, link.local_port); v4l2_of_put_link(&link); continue; } /* Find the remote entity. */ ent = xvip_graph_find_entity(xdev, link.remote_node); if (ent == NULL) { dev_err(xdev->dev, "no entity found for %s\n", link.remote_node->full_name); v4l2_of_put_link(&link); ret = -ENODEV; break; } remote = ent->entity; if (link.remote_port >= remote->num_pads) { dev_err(xdev->dev, "invalid port number %u on %s\n", link.remote_port, link.remote_node->full_name); v4l2_of_put_link(&link); ret = -EINVAL; break; } remote_pad = &remote->pads[link.remote_port]; v4l2_of_put_link(&link); /* Create the media link. */ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n", local->name, local_pad->index, remote->name, remote_pad->index); ret = media_create_pad_link(local, local_pad->index, remote, remote_pad->index, link_flags); if (ret < 0) { dev_err(xdev->dev, "failed to create %s:%u -> %s:%u link\n", local->name, local_pad->index, remote->name, remote_pad->index); break; } } of_node_put(ep); return ret; } static struct xvip_dma * xvip_graph_find_dma(struct xvip_composite_device *xdev, unsigned int port) { struct xvip_dma *dma; list_for_each_entry(dma, &xdev->dmas, list) { if (dma->port == port) return dma; } return NULL; } static int xvip_graph_build_dma(struct xvip_composite_device *xdev) { u32 link_flags = MEDIA_LNK_FL_ENABLED; struct device_node *node = xdev->dev->of_node; struct media_entity *source; struct media_entity *sink; struct media_pad *source_pad; struct media_pad *sink_pad; struct xvip_graph_entity *ent; struct v4l2_of_link link; struct device_node *ep = NULL; struct device_node *next; struct xvip_dma *dma; int ret = 0; dev_dbg(xdev->dev, "creating links for DMA engines\n"); while (1) { /* Get the next endpoint and parse its link. */ next = of_graph_get_next_endpoint(node, ep); if (next == NULL) break; of_node_put(ep); ep = next; dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name); ret = v4l2_of_parse_link(ep, &link); if (ret < 0) { dev_err(xdev->dev, "failed to parse link for %s\n", ep->full_name); continue; } /* Find the DMA engine. */ dma = xvip_graph_find_dma(xdev, link.local_port); if (dma == NULL) { dev_err(xdev->dev, "no DMA engine found for port %u\n", link.local_port); v4l2_of_put_link(&link); ret = -EINVAL; break; } dev_dbg(xdev->dev, "creating link for DMA engine %s\n", dma->video.name); /* Find the remote entity. */ ent = xvip_graph_find_entity(xdev, link.remote_node); if (ent == NULL) { dev_err(xdev->dev, "no entity found for %s\n", link.remote_node->full_name); v4l2_of_put_link(&link); ret = -ENODEV; break; } if (link.remote_port >= ent->entity->num_pads) { dev_err(xdev->dev, "invalid port number %u on %s\n", link.remote_port, link.remote_node->full_name); v4l2_of_put_link(&link); ret = -EINVAL; break; } if (dma->pad.flags & MEDIA_PAD_FL_SOURCE) { source = &dma->video.entity; source_pad = &dma->pad; sink = ent->entity; sink_pad = &sink->pads[link.remote_port]; } else { source = ent->entity; source_pad = &source->pads[link.remote_port]; sink = &dma->video.entity; sink_pad = &dma->pad; } v4l2_of_put_link(&link); /* Create the media link. */ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n", source->name, source_pad->index, sink->name, sink_pad->index); ret = media_create_pad_link(source, source_pad->index, sink, sink_pad->index, link_flags); if (ret < 0) { dev_err(xdev->dev, "failed to create %s:%u -> %s:%u link\n", source->name, source_pad->index, sink->name, sink_pad->index); break; } } of_node_put(ep); return ret; } static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier) { struct xvip_composite_device *xdev = container_of(notifier, struct xvip_composite_device, notifier); struct xvip_graph_entity *entity; int ret; dev_dbg(xdev->dev, "notify complete, all subdevs registered\n"); /* Create links for every entity. */ list_for_each_entry(entity, &xdev->entities, list) { ret = xvip_graph_build_one(xdev, entity); if (ret < 0) return ret; } /* Create links for DMA channels. */ ret = xvip_graph_build_dma(xdev); if (ret < 0) return ret; ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev); if (ret < 0) dev_err(xdev->dev, "failed to register subdev nodes\n"); return media_device_register(&xdev->media_dev); } static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd) { struct xvip_composite_device *xdev = container_of(notifier, struct xvip_composite_device, notifier); struct xvip_graph_entity *entity; /* Locate the entity corresponding to the bound subdev and store the * subdev pointer. */ list_for_each_entry(entity, &xdev->entities, list) { if (entity->node != subdev->dev->of_node) continue; if (entity->subdev) { dev_err(xdev->dev, "duplicate subdev for node %s\n", entity->node->full_name); return -EINVAL; } dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name); entity->entity = &subdev->entity; entity->subdev = subdev; return 0; } dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name); return -EINVAL; } static int xvip_graph_parse_one(struct xvip_composite_device *xdev, struct device_node *node) { struct xvip_graph_entity *entity; struct device_node *remote; struct device_node *ep = NULL; int ret = 0; dev_dbg(xdev->dev, "parsing node %s\n", node->full_name); while (1) { ep = of_graph_get_next_endpoint(node, ep); if (ep == NULL) break; dev_dbg(xdev->dev, "handling endpoint %s\n", ep->full_name); remote = of_graph_get_remote_port_parent(ep); if (remote == NULL) { ret = -EINVAL; break; } /* Skip entities that we have already processed. */ if (remote == xdev->dev->of_node || xvip_graph_find_entity(xdev, remote)) { of_node_put(remote); continue; } entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL); if (entity == NULL) { of_node_put(remote); ret = -ENOMEM; break; } entity->node = remote; entity->asd.match_type = V4L2_ASYNC_MATCH_OF; entity->asd.match.of.node = remote; list_add_tail(&entity->list, &xdev->entities); xdev->num_subdevs++; } of_node_put(ep); return ret; } static int xvip_graph_parse(struct xvip_composite_device *xdev) { struct xvip_graph_entity *entity; int ret; /* * Walk the links to parse the full graph. Start by parsing the * composite node and then parse entities in turn. The list_for_each * loop will handle entities added at the end of the list while walking * the links. */ ret = xvip_graph_parse_one(xdev, xdev->dev->of_node); if (ret < 0) return 0; list_for_each_entry(entity, &xdev->entities, list) { ret = xvip_graph_parse_one(xdev, entity->node); if (ret < 0) break; } return ret; } static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev, struct device_node *node) { struct xvip_dma *dma; enum v4l2_buf_type type; const char *direction; unsigned int index; int ret; ret = of_property_read_string(node, "direction", &direction); if (ret < 0) return ret; if (strcmp(direction, "input") == 0) type = V4L2_BUF_TYPE_VIDEO_CAPTURE; else if (strcmp(direction, "output") == 0) type = V4L2_BUF_TYPE_VIDEO_OUTPUT; else return -EINVAL; of_property_read_u32(node, "reg", &index); dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL); if (dma == NULL) return -ENOMEM; ret = xvip_dma_init(xdev, dma, type, index); if (ret < 0) { dev_err(xdev->dev, "%s initialization failed\n", node->full_name); return ret; } list_add_tail(&dma->list, &xdev->dmas); xdev->v4l2_caps |= type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? V4L2_CAP_VIDEO_CAPTURE : V4L2_CAP_VIDEO_OUTPUT; return 0; } static int xvip_graph_dma_init(struct xvip_composite_device *xdev) { struct device_node *ports; struct device_node *port; int ret; ports = of_get_child_by_name(xdev->dev->of_node, "ports"); if (ports == NULL) { dev_err(xdev->dev, "ports node not present\n"); return -EINVAL; } for_each_child_of_node(ports, port) { ret = xvip_graph_dma_init_one(xdev, port); if (ret < 0) { of_node_put(port); return ret; } } return 0; } static void xvip_graph_cleanup(struct xvip_composite_device *xdev) { struct xvip_graph_entity *entityp; struct xvip_graph_entity *entity; struct xvip_dma *dmap; struct xvip_dma *dma; v4l2_async_notifier_unregister(&xdev->notifier); list_for_each_entry_safe(entity, entityp, &xdev->entities, list) { of_node_put(entity->node); list_del(&entity->list); } list_for_each_entry_safe(dma, dmap, &xdev->dmas, list) { xvip_dma_cleanup(dma); list_del(&dma->list); } } static int xvip_graph_init(struct xvip_composite_device *xdev) { struct xvip_graph_entity *entity; struct v4l2_async_subdev **subdevs = NULL; unsigned int num_subdevs; unsigned int i; int ret; /* Init the DMA channels. */ ret = xvip_graph_dma_init(xdev); if (ret < 0) { dev_err(xdev->dev, "DMA initialization failed\n"); goto done; } /* Parse the graph to extract a list of subdevice DT nodes. */ ret = xvip_graph_parse(xdev); if (ret < 0) { dev_err(xdev->dev, "graph parsing failed\n"); goto done; } if (!xdev->num_subdevs) { dev_err(xdev->dev, "no subdev found in graph\n"); goto done; } /* Register the subdevices notifier. */ num_subdevs = xdev->num_subdevs; subdevs = devm_kzalloc(xdev->dev, sizeof(*subdevs) * num_subdevs, GFP_KERNEL); if (subdevs == NULL) { ret = -ENOMEM; goto done; } i = 0; list_for_each_entry(entity, &xdev->entities, list) subdevs[i++] = &entity->asd; xdev->notifier.subdevs = subdevs; xdev->notifier.num_subdevs = num_subdevs; xdev->notifier.bound = xvip_graph_notify_bound; xdev->notifier.complete = xvip_graph_notify_complete; ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier); if (ret < 0) { dev_err(xdev->dev, "notifier registration failed\n"); goto done; } ret = 0; done: if (ret < 0) xvip_graph_cleanup(xdev); return ret; } /* ----------------------------------------------------------------------------- * Media Controller and V4L2 */ static void xvip_composite_v4l2_cleanup(struct xvip_composite_device *xdev) { v4l2_device_unregister(&xdev->v4l2_dev); media_device_unregister(&xdev->media_dev); media_device_cleanup(&xdev->media_dev); } static int xvip_composite_v4l2_init(struct xvip_composite_device *xdev) { int ret; xdev->media_dev.dev = xdev->dev; strlcpy(xdev->media_dev.model, "Xilinx Video Composite Device", sizeof(xdev->media_dev.model)); xdev->media_dev.hw_revision = 0; media_device_init(&xdev->media_dev); xdev->v4l2_dev.mdev = &xdev->media_dev; ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev); if (ret < 0) { dev_err(xdev->dev, "V4L2 device registration failed (%d)\n", ret); media_device_cleanup(&xdev->media_dev); return ret; } return 0; } /* ----------------------------------------------------------------------------- * Platform Device Driver */ static int xvip_composite_probe(struct platform_device *pdev) { struct xvip_composite_device *xdev; int ret; xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); if (!xdev) return -ENOMEM; xdev->dev = &pdev->dev; INIT_LIST_HEAD(&xdev->entities); INIT_LIST_HEAD(&xdev->dmas); ret = xvip_composite_v4l2_init(xdev); if (ret < 0) return ret; ret = xvip_graph_init(xdev); if (ret < 0) goto error; platform_set_drvdata(pdev, xdev); dev_info(xdev->dev, "device registered\n"); return 0; error: xvip_composite_v4l2_cleanup(xdev); return ret; } static int xvip_composite_remove(struct platform_device *pdev) { struct xvip_composite_device *xdev = platform_get_drvdata(pdev); xvip_graph_cleanup(xdev); xvip_composite_v4l2_cleanup(xdev); return 0; } static const struct of_device_id xvip_composite_of_id_table[] = { { .compatible = "xlnx,video" }, { } }; MODULE_DEVICE_TABLE(of, xvip_composite_of_id_table); static struct platform_driver xvip_composite_driver = { .driver = { .name = "xilinx-video", .of_match_table = xvip_composite_of_id_table, }, .probe = xvip_composite_probe, .remove = xvip_composite_remove, }; module_platform_driver(xvip_composite_driver); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); MODULE_DESCRIPTION("Xilinx Video IP Composite Driver"); MODULE_LICENSE("GPL v2");
null
null
null
null
107,595
37,864
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
202,859
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * config.c * * Helper functions for parsing config items. * Originally copied from GIT source. * * Copyright (C) Linus Torvalds, 2005 * Copyright (C) Johannes Schindelin, 2005 * */ #include "util.h" #include "cache.h" #include <subcmd/exec-cmd.h> #include "util/hist.h" /* perf_hist_config */ #include "util/llvm-utils.h" /* perf_llvm_config */ #include "config.h" #define MAXNAME (256) #define DEBUG_CACHE_DIR ".debug" char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */ static FILE *config_file; static const char *config_file_name; static int config_linenr; static int config_file_eof; static struct perf_config_set *config_set; const char *config_exclusive_filename; static int get_next_char(void) { int c; FILE *f; c = '\n'; if ((f = config_file) != NULL) { c = fgetc(f); if (c == '\r') { /* DOS like systems */ c = fgetc(f); if (c != '\n') { ungetc(c, f); c = '\r'; } } if (c == '\n') config_linenr++; if (c == EOF) { config_file_eof = 1; c = '\n'; } } return c; } static char *parse_value(void) { static char value[1024]; int quote = 0, comment = 0, space = 0; size_t len = 0; for (;;) { int c = get_next_char(); if (len >= sizeof(value) - 1) return NULL; if (c == '\n') { if (quote) return NULL; value[len] = 0; return value; } if (comment) continue; if (isspace(c) && !quote) { space = 1; continue; } if (!quote) { if (c == ';' || c == '#') { comment = 1; continue; } } if (space) { if (len) value[len++] = ' '; space = 0; } if (c == '\\') { c = get_next_char(); switch (c) { case '\n': continue; case 't': c = '\t'; break; case 'b': c = '\b'; break; case 'n': c = '\n'; break; /* Some characters escape as themselves */ case '\\': case '"': break; /* Reject unknown escape sequences */ default: return NULL; } value[len++] = c; continue; } if (c == '"') { quote = 1-quote; continue; } value[len++] = c; } } static inline int iskeychar(int c) { return isalnum(c) || c == '-' || c == '_'; } static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) { int c; char *value; /* Get the full name */ for (;;) { c = get_next_char(); if (config_file_eof) break; if (!iskeychar(c)) break; name[len++] = c; if (len >= MAXNAME) return -1; } name[len] = 0; while (c == ' ' || c == '\t') c = get_next_char(); value = NULL; if (c != '\n') { if (c != '=') return -1; value = parse_value(); if (!value) return -1; } return fn(name, value, data); } static int get_extended_base_var(char *name, int baselen, int c) { do { if (c == '\n') return -1; c = get_next_char(); } while (isspace(c)); /* We require the format to be '[base "extension"]' */ if (c != '"') return -1; name[baselen++] = '.'; for (;;) { int ch = get_next_char(); if (ch == '\n') return -1; if (ch == '"') break; if (ch == '\\') { ch = get_next_char(); if (ch == '\n') return -1; } name[baselen++] = ch; if (baselen > MAXNAME / 2) return -1; } /* Final ']' */ if (get_next_char() != ']') return -1; return baselen; } static int get_base_var(char *name) { int baselen = 0; for (;;) { int c = get_next_char(); if (config_file_eof) return -1; if (c == ']') return baselen; if (isspace(c)) return get_extended_base_var(name, baselen, c); if (!iskeychar(c) && c != '.') return -1; if (baselen > MAXNAME / 2) return -1; name[baselen++] = tolower(c); } } static int perf_parse_file(config_fn_t fn, void *data) { int comment = 0; int baselen = 0; static char var[MAXNAME]; /* U+FEFF Byte Order Mark in UTF8 */ static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; const unsigned char *bomptr = utf8_bom; for (;;) { int line, c = get_next_char(); if (bomptr && *bomptr) { /* We are at the file beginning; skip UTF8-encoded BOM * if present. Sane editors won't put this in on their * own, but e.g. Windows Notepad will do it happily. */ if ((unsigned char) c == *bomptr) { bomptr++; continue; } else { /* Do not tolerate partial BOM. */ if (bomptr != utf8_bom) break; /* No BOM at file beginning. Cool. */ bomptr = NULL; } } if (c == '\n') { if (config_file_eof) return 0; comment = 0; continue; } if (comment || isspace(c)) continue; if (c == '#' || c == ';') { comment = 1; continue; } if (c == '[') { baselen = get_base_var(var); if (baselen <= 0) break; var[baselen++] = '.'; var[baselen] = 0; continue; } if (!isalpha(c)) break; var[baselen] = tolower(c); /* * The get_value function might or might not reach the '\n', * so saving the current line number for error reporting. */ line = config_linenr; if (get_value(fn, data, var, baselen+1) < 0) { config_linenr = line; break; } } pr_err("bad config file line %d in %s\n", config_linenr, config_file_name); return -1; } static int parse_unit_factor(const char *end, unsigned long *val) { if (!*end) return 1; else if (!strcasecmp(end, "k")) { *val *= 1024; return 1; } else if (!strcasecmp(end, "m")) { *val *= 1024 * 1024; return 1; } else if (!strcasecmp(end, "g")) { *val *= 1024 * 1024 * 1024; return 1; } return 0; } static int perf_parse_llong(const char *value, long long *ret) { if (value && *value) { char *end; long long val = strtoll(value, &end, 0); unsigned long factor = 1; if (!parse_unit_factor(end, &factor)) return 0; *ret = val * factor; return 1; } return 0; } static int perf_parse_long(const char *value, long *ret) { if (value && *value) { char *end; long val = strtol(value, &end, 0); unsigned long factor = 1; if (!parse_unit_factor(end, &factor)) return 0; *ret = val * factor; return 1; } return 0; } static void die_bad_config(const char *name) { if (config_file_name) die("bad config value for '%s' in %s", name, config_file_name); die("bad config value for '%s'", name); } u64 perf_config_u64(const char *name, const char *value) { long long ret = 0; if (!perf_parse_llong(value, &ret)) die_bad_config(name); return (u64) ret; } int perf_config_int(const char *name, const char *value) { long ret = 0; if (!perf_parse_long(value, &ret)) die_bad_config(name); return ret; } static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) { *is_bool = 1; if (!value) return 1; if (!*value) return 0; if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) return 1; if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) return 0; *is_bool = 0; return perf_config_int(name, value); } int perf_config_bool(const char *name, const char *value) { int discard; return !!perf_config_bool_or_int(name, value, &discard); } static const char *perf_config_dirname(const char *name, const char *value) { if (!name) return NULL; return value; } static int perf_buildid_config(const char *var, const char *value) { /* same dir for all commands */ if (!strcmp(var, "buildid.dir")) { const char *dir = perf_config_dirname(var, value); if (!dir) { pr_err("Invalid buildid directory!\n"); return -1; } strncpy(buildid_dir, dir, MAXPATHLEN-1); buildid_dir[MAXPATHLEN-1] = '\0'; } return 0; } static int perf_default_core_config(const char *var __maybe_unused, const char *value __maybe_unused) { /* Add other config variables here. */ return 0; } static int perf_ui_config(const char *var, const char *value) { /* Add other config variables here. */ if (!strcmp(var, "ui.show-headers")) symbol_conf.show_hist_headers = perf_config_bool(var, value); return 0; } int perf_default_config(const char *var, const char *value, void *dummy __maybe_unused) { if (!prefixcmp(var, "core.")) return perf_default_core_config(var, value); if (!prefixcmp(var, "hist.")) return perf_hist_config(var, value); if (!prefixcmp(var, "ui.")) return perf_ui_config(var, value); if (!prefixcmp(var, "call-graph.")) return perf_callchain_config(var, value); if (!prefixcmp(var, "llvm.")) return perf_llvm_config(var, value); if (!prefixcmp(var, "buildid.")) return perf_buildid_config(var, value); /* Add other config variables here. */ return 0; } static int perf_config_from_file(config_fn_t fn, const char *filename, void *data) { int ret; FILE *f = fopen(filename, "r"); ret = -1; if (f) { config_file = f; config_file_name = filename; config_linenr = 1; config_file_eof = 0; ret = perf_parse_file(fn, data); fclose(f); config_file_name = NULL; } return ret; } const char *perf_etc_perfconfig(void) { static const char *system_wide; if (!system_wide) system_wide = system_path(ETC_PERFCONFIG); return system_wide; } static int perf_env_bool(const char *k, int def) { const char *v = getenv(k); return v ? perf_config_bool(k, v) : def; } static int perf_config_system(void) { return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); } static int perf_config_global(void) { return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); } static struct perf_config_section *find_section(struct list_head *sections, const char *section_name) { struct perf_config_section *section; list_for_each_entry(section, sections, node) if (!strcmp(section->name, section_name)) return section; return NULL; } static struct perf_config_item *find_config_item(const char *name, struct perf_config_section *section) { struct perf_config_item *item; list_for_each_entry(item, &section->items, node) if (!strcmp(item->name, name)) return item; return NULL; } static struct perf_config_section *add_section(struct list_head *sections, const char *section_name) { struct perf_config_section *section = zalloc(sizeof(*section)); if (!section) return NULL; INIT_LIST_HEAD(&section->items); section->name = strdup(section_name); if (!section->name) { pr_debug("%s: strdup failed\n", __func__); free(section); return NULL; } list_add_tail(&section->node, sections); return section; } static struct perf_config_item *add_config_item(struct perf_config_section *section, const char *name) { struct perf_config_item *item = zalloc(sizeof(*item)); if (!item) return NULL; item->name = strdup(name); if (!item->name) { pr_debug("%s: strdup failed\n", __func__); free(item); return NULL; } list_add_tail(&item->node, &section->items); return item; } static int set_value(struct perf_config_item *item, const char *value) { char *val = strdup(value); if (!val) return -1; zfree(&item->value); item->value = val; return 0; } static int collect_config(const char *var, const char *value, void *perf_config_set) { int ret = -1; char *ptr, *key; char *section_name, *name; struct perf_config_section *section = NULL; struct perf_config_item *item = NULL; struct perf_config_set *set = perf_config_set; struct list_head *sections; if (set == NULL) return -1; sections = &set->sections; key = ptr = strdup(var); if (!key) { pr_debug("%s: strdup failed\n", __func__); return -1; } section_name = strsep(&ptr, "."); name = ptr; if (name == NULL || value == NULL) goto out_free; section = find_section(sections, section_name); if (!section) { section = add_section(sections, section_name); if (!section) goto out_free; } item = find_config_item(name, section); if (!item) { item = add_config_item(section, name); if (!item) goto out_free; } /* perf_config_set can contain both user and system config items. * So we should know where each value is from. * The classification would be needed when a particular config file * is overwrited by setting feature i.e. set_config(). */ if (strcmp(config_file_name, perf_etc_perfconfig()) == 0) { section->from_system_config = true; item->from_system_config = true; } else { section->from_system_config = false; item->from_system_config = false; } ret = set_value(item, value); return ret; out_free: free(key); return -1; } int perf_config_set__collect(struct perf_config_set *set, const char *file_name, const char *var, const char *value) { config_file_name = file_name; return collect_config(var, value, set); } static int perf_config_set__init(struct perf_config_set *set) { int ret = -1; const char *home = NULL; /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ if (config_exclusive_filename) return perf_config_from_file(collect_config, config_exclusive_filename, set); if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { if (perf_config_from_file(collect_config, perf_etc_perfconfig(), set) < 0) goto out; } home = getenv("HOME"); if (perf_config_global() && home) { char *user_config = strdup(mkpath("%s/.perfconfig", home)); struct stat st; if (user_config == NULL) { warning("Not enough memory to process %s/.perfconfig, " "ignoring it.", home); goto out; } if (stat(user_config, &st) < 0) { if (errno == ENOENT) ret = 0; goto out_free; } ret = 0; if (st.st_uid && (st.st_uid != geteuid())) { warning("File %s not owned by current user or root, " "ignoring it.", user_config); goto out_free; } if (st.st_size) ret = perf_config_from_file(collect_config, user_config, set); out_free: free(user_config); } out: return ret; } struct perf_config_set *perf_config_set__new(void) { struct perf_config_set *set = zalloc(sizeof(*set)); if (set) { INIT_LIST_HEAD(&set->sections); if (perf_config_set__init(set) < 0) { perf_config_set__delete(set); set = NULL; } } return set; } int perf_config(config_fn_t fn, void *data) { int ret = 0; char key[BUFSIZ]; struct perf_config_section *section; struct perf_config_item *item; if (config_set == NULL) return -1; perf_config_set__for_each_entry(config_set, section, item) { char *value = item->value; if (value) { scnprintf(key, sizeof(key), "%s.%s", section->name, item->name); ret = fn(key, value, data); if (ret < 0) { pr_err("Error: wrong config key-value pair %s=%s\n", key, value); break; } } } return ret; } void perf_config__init(void) { if (config_set == NULL) config_set = perf_config_set__new(); } void perf_config__exit(void) { perf_config_set__delete(config_set); config_set = NULL; } void perf_config__refresh(void) { perf_config__exit(); perf_config__init(); } static void perf_config_item__delete(struct perf_config_item *item) { zfree(&item->name); zfree(&item->value); free(item); } static void perf_config_section__purge(struct perf_config_section *section) { struct perf_config_item *item, *tmp; list_for_each_entry_safe(item, tmp, &section->items, node) { list_del_init(&item->node); perf_config_item__delete(item); } } static void perf_config_section__delete(struct perf_config_section *section) { perf_config_section__purge(section); zfree(&section->name); free(section); } static void perf_config_set__purge(struct perf_config_set *set) { struct perf_config_section *section, *tmp; list_for_each_entry_safe(section, tmp, &set->sections, node) { list_del_init(&section->node); perf_config_section__delete(section); } } void perf_config_set__delete(struct perf_config_set *set) { if (set == NULL) return; perf_config_set__purge(set); free(set); } /* * Call this to report error for your variable that should not * get a boolean value (i.e. "[my] var" means "true"). */ int config_error_nonbool(const char *var) { return error("Missing value for '%s'", var); } void set_buildid_dir(const char *dir) { if (dir) scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir); /* default to $HOME/.debug */ if (buildid_dir[0] == '\0') { char *home = getenv("HOME"); if (home) { snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s", home, DEBUG_CACHE_DIR); } else { strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1); } buildid_dir[MAXPATHLEN-1] = '\0'; } /* for communicating with external commands */ setenv("PERF_BUILDID_DIR", buildid_dir, 1); }
null
null
null
null
111,206
26,327
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
26,327
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "printing/page_setup.h" #include <algorithm> #include "base/logging.h" namespace printing { PageMargins::PageMargins() : header(0), footer(0), left(0), right(0), top(0), bottom(0) { } void PageMargins::Clear() { header = 0; footer = 0; left = 0; right = 0; top = 0; bottom = 0; } bool PageMargins::Equals(const PageMargins& rhs) const { return header == rhs.header && footer == rhs.footer && left == rhs.left && top == rhs.top && right == rhs.right && bottom == rhs.bottom; } PageSetup::PageSetup() { Clear(); } PageSetup::PageSetup(const PageSetup& other) = default; PageSetup::~PageSetup() = default; void PageSetup::Clear() { physical_size_.SetSize(0, 0); printable_area_.SetRect(0, 0, 0, 0); overlay_area_.SetRect(0, 0, 0, 0); content_area_.SetRect(0, 0, 0, 0); effective_margins_.Clear(); text_height_ = 0; forced_margins_ = false; } bool PageSetup::Equals(const PageSetup& rhs) const { return physical_size_ == rhs.physical_size_ && printable_area_ == rhs.printable_area_ && overlay_area_ == rhs.overlay_area_ && content_area_ == rhs.content_area_ && effective_margins_.Equals(rhs.effective_margins_) && requested_margins_.Equals(rhs.requested_margins_) && text_height_ == rhs.text_height_; } void PageSetup::Init(const gfx::Size& physical_size, const gfx::Rect& printable_area, int text_height) { DCHECK_LE(printable_area.right(), physical_size.width()); // I've seen this assert triggers on Canon GP160PF PCL 5e and HP LaserJet 5. // Since we don't know the dpi here, just disable the check. // DCHECK_LE(printable_area.bottom(), physical_size.height()); DCHECK_GE(printable_area.x(), 0); DCHECK_GE(printable_area.y(), 0); DCHECK_GE(text_height, 0); physical_size_ = physical_size; printable_area_ = printable_area; text_height_ = text_height; SetRequestedMarginsAndCalculateSizes(requested_margins_); } void PageSetup::SetRequestedMargins(const PageMargins& requested_margins) { forced_margins_ = false; SetRequestedMarginsAndCalculateSizes(requested_margins); } void PageSetup::ForceRequestedMargins(const PageMargins& requested_margins) { forced_margins_ = true; SetRequestedMarginsAndCalculateSizes(requested_margins); } void PageSetup::FlipOrientation() { if (physical_size_.width() && physical_size_.height()) { gfx::Size new_size(physical_size_.height(), physical_size_.width()); int new_y = physical_size_.width() - (printable_area_.width() + printable_area_.x()); gfx::Rect new_printable_area(printable_area_.y(), new_y, printable_area_.height(), printable_area_.width()); Init(new_size, new_printable_area, text_height_); } } void PageSetup::SetRequestedMarginsAndCalculateSizes( const PageMargins& requested_margins) { requested_margins_ = requested_margins; if (physical_size_.width() && physical_size_.height()) { if (forced_margins_) CalculateSizesWithinRect(gfx::Rect(physical_size_), 0); else CalculateSizesWithinRect(printable_area_, text_height_); } } void PageSetup::CalculateSizesWithinRect(const gfx::Rect& bounds, int text_height) { // Calculate the effective margins. The tricky part. effective_margins_.header = std::max(requested_margins_.header, bounds.y()); effective_margins_.footer = std::max(requested_margins_.footer, physical_size_.height() - bounds.bottom()); effective_margins_.left = std::max(requested_margins_.left, bounds.x()); effective_margins_.top = std::max(std::max(requested_margins_.top, bounds.y()), effective_margins_.header + text_height); effective_margins_.right = std::max(requested_margins_.right, physical_size_.width() - bounds.right()); effective_margins_.bottom = std::max(std::max(requested_margins_.bottom, physical_size_.height() - bounds.bottom()), effective_margins_.footer + text_height); // Calculate the overlay area. If the margins are excessive, the overlay_area // size will be (0, 0). overlay_area_.set_x(effective_margins_.left); overlay_area_.set_y(effective_margins_.header); overlay_area_.set_width(std::max(0, physical_size_.width() - effective_margins_.right - overlay_area_.x())); overlay_area_.set_height(std::max(0, physical_size_.height() - effective_margins_.footer - overlay_area_.y())); // Calculate the content area. If the margins are excessive, the content_area // size will be (0, 0). content_area_.set_x(effective_margins_.left); content_area_.set_y(effective_margins_.top); content_area_.set_width(std::max(0, physical_size_.width() - effective_margins_.right - content_area_.x())); content_area_.set_height(std::max(0, physical_size_.height() - effective_margins_.bottom - content_area_.y())); } } // namespace printing
null
null
null
null
23,190
26,097
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
26,097
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "extensions/browser/api/socket/socket.h" #include "base/bind.h" #include "base/lazy_instance.h" #include "extensions/browser/api/api_resource_manager.h" #include "net/base/address_list.h" #include "net/base/io_buffer.h" #include "net/base/ip_address.h" #include "net/base/ip_endpoint.h" #include "net/base/net_errors.h" #include "net/socket/socket.h" namespace extensions { const char kSocketTypeNotSupported[] = "Socket type does not support this API"; static base::LazyInstance< BrowserContextKeyedAPIFactory<ApiResourceManager<Socket>>>::DestructorAtExit g_factory = LAZY_INSTANCE_INITIALIZER; // static template <> BrowserContextKeyedAPIFactory<ApiResourceManager<Socket> >* ApiResourceManager<Socket>::GetFactoryInstance() { return g_factory.Pointer(); } Socket::Socket(const std::string& owner_extension_id) : ApiResource(owner_extension_id), is_connected_(false) {} Socket::~Socket() { // Derived destructors should make sure the socket has been closed. DCHECK(!is_connected_); } void Socket::Write(scoped_refptr<net::IOBuffer> io_buffer, int byte_count, const CompletionCallback& callback) { DCHECK(!callback.is_null()); write_queue_.push(WriteRequest(io_buffer, byte_count, callback)); WriteData(); } void Socket::WriteData() { // IO is pending. if (io_buffer_write_.get()) return; WriteRequest& request = write_queue_.front(); DCHECK(request.byte_count >= request.bytes_written); io_buffer_write_ = new net::WrappedIOBuffer(request.io_buffer->data() + request.bytes_written); int result = WriteImpl( io_buffer_write_.get(), request.byte_count - request.bytes_written, base::Bind(&Socket::OnWriteComplete, base::Unretained(this))); if (result != net::ERR_IO_PENDING) OnWriteComplete(result); } void Socket::OnWriteComplete(int result) { io_buffer_write_ = NULL; WriteRequest& request = write_queue_.front(); if (result >= 0) { request.bytes_written += result; if (request.bytes_written < request.byte_count) { WriteData(); return; } DCHECK(request.bytes_written == request.byte_count); result = request.bytes_written; } request.callback.Run(result); write_queue_.pop(); if (!write_queue_.empty()) WriteData(); } bool Socket::SetKeepAlive(bool enable, int delay) { return false; } bool Socket::SetNoDelay(bool no_delay) { return false; } int Socket::Listen(const std::string& address, uint16_t port, int backlog, std::string* error_msg) { *error_msg = kSocketTypeNotSupported; return net::ERR_FAILED; } void Socket::Accept(const AcceptCompletionCallback& callback) { callback.Run(net::ERR_FAILED, NULL); } // static bool Socket::StringAndPortToIPEndPoint(const std::string& ip_address_str, uint16_t port, net::IPEndPoint* ip_end_point) { DCHECK(ip_end_point); net::IPAddress ip_address; if (!ip_address.AssignFromIPLiteral(ip_address_str)) return false; *ip_end_point = net::IPEndPoint(ip_address, port); return true; } void Socket::IPEndPointToStringAndPort(const net::IPEndPoint& address, std::string* ip_address_str, uint16_t* port) { DCHECK(ip_address_str); DCHECK(port); *ip_address_str = address.ToStringWithoutPort(); if (ip_address_str->empty()) { *port = 0; } else { *port = address.port(); } } Socket::WriteRequest::WriteRequest(scoped_refptr<net::IOBuffer> io_buffer, int byte_count, const CompletionCallback& callback) : io_buffer(io_buffer), byte_count(byte_count), callback(callback), bytes_written(0) {} Socket::WriteRequest::WriteRequest(const WriteRequest& other) = default; Socket::WriteRequest::~WriteRequest() {} // static net::NetworkTrafficAnnotationTag Socket::GetNetworkTrafficAnnotationTag() { return net::DefineNetworkTrafficAnnotation("chrome_apps_socket_api", R"( semantics { sender: "Chrome Apps Socket API" description: "Chrome Apps can use this API to send and receive data over " "the network using TCP and UDP connections." trigger: "A request from a Chrome App." data: "Any data that the app sends." destination: OTHER destination_other: "Data can be sent to any destination included in the app manifest." } policy { cookies_allowed: NO setting: "No settings control. Chrome Connectivity Diagnostics component " "uses this API. Other than that, this request will not be sent if " "the user does not install a Chrome App that uses the Socket API." chrome_policy { ExtensionInstallBlacklist { ExtensionInstallBlacklist: { entries: '*' } } } })"); } } // namespace extensions
null
null
null
null
22,960
58,693
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
58,693
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/feature_list.h" #include "base/strings/stringprintf.h" #include "chrome/browser/extensions/extension_apitest.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/tabs/tab_strip_model.h" #include "chrome/test/base/ui_test_utils.h" #include "content/public/browser/web_contents.h" #include "content/public/common/content_features.h" #include "content/public/test/browser_test_utils.h" #include "net/dns/mock_host_resolver.h" #include "net/test/embedded_test_server/embedded_test_server.h" #include "url/gurl.h" namespace { class ClipboardApiTest : public ExtensionApiTest { public: void SetUpOnMainThread() override { ExtensionApiTest::SetUpOnMainThread(); host_resolver()->AddRule("*", "127.0.0.1"); } bool LoadHostedApp(const std::string& app_name, const std::string& launch_page); bool ExecuteCopyInSelectedTab(); bool ExecutePasteInSelectedTab(); bool ExecuteCommandInIframeInSelectedTab(const char* command); private: bool ExecuteScriptInSelectedTab(const std::string& script); }; bool ClipboardApiTest::LoadHostedApp(const std::string& app_name, const std::string& launch_page) { if (!StartEmbeddedTestServer()) { message_ = "Failed to start test server."; return false; } if (!LoadExtension(test_data_dir_.AppendASCII("clipboard") .AppendASCII(app_name))) { message_ = "Failed to load hosted app."; return false; } GURL base_url = embedded_test_server()->GetURL( "/extensions/api_test/clipboard/"); GURL::Replacements replace_host; replace_host.SetHostStr("localhost"); base_url = base_url.ReplaceComponents(replace_host); std::string launch_page_path = base::StringPrintf("%s/%s", app_name.c_str(), launch_page.c_str()); ui_test_utils::NavigateToURL(browser(), base_url.Resolve(launch_page_path)); return true; } bool ClipboardApiTest::ExecuteCopyInSelectedTab() { const char kScript[] = "window.domAutomationController.send(document.execCommand('copy'))"; return ExecuteScriptInSelectedTab(kScript); } bool ClipboardApiTest::ExecutePasteInSelectedTab() { const char kScript[] = "window.domAutomationController.send(document.execCommand('paste'))"; return ExecuteScriptInSelectedTab(kScript); } bool ClipboardApiTest::ExecuteCommandInIframeInSelectedTab( const char* command) { const char kScript[] = "var ifr = document.createElement('iframe');\n" "document.body.appendChild(ifr);\n" "ifr.contentDocument.write('<script>parent.domAutomationController.send(" "document.execCommand(\"%s\"))</script>');"; return ExecuteScriptInSelectedTab(base::StringPrintf(kScript, command)); } bool ClipboardApiTest::ExecuteScriptInSelectedTab(const std::string& script) { bool result; CHECK(content::ExecuteScriptAndExtractBool( browser()->tab_strip_model()->GetActiveWebContents(), script, &result)); return result; } } // namespace IN_PROC_BROWSER_TEST_F(ClipboardApiTest, Extension) { ASSERT_TRUE(StartEmbeddedTestServer()); ASSERT_TRUE(RunExtensionTest("clipboard/extension")) << message_; } IN_PROC_BROWSER_TEST_F(ClipboardApiTest, ExtensionNoPermission) { ASSERT_TRUE(StartEmbeddedTestServer()); ASSERT_TRUE(RunExtensionTest("clipboard/extension_no_permission")) << message_; } IN_PROC_BROWSER_TEST_F(ClipboardApiTest, HostedApp) { ASSERT_TRUE(LoadHostedApp("hosted_app", "main.html")) << message_; EXPECT_TRUE(ExecuteCopyInSelectedTab()) << message_; EXPECT_TRUE(ExecutePasteInSelectedTab()) << message_; EXPECT_TRUE(ExecuteCommandInIframeInSelectedTab("copy")) << message_; EXPECT_TRUE(ExecuteCommandInIframeInSelectedTab("paste")) << message_; } IN_PROC_BROWSER_TEST_F(ClipboardApiTest, HostedAppNoPermission) { ASSERT_TRUE(LoadHostedApp("hosted_app_no_permission", "main.html")) << message_; // TODO(dcheng): The test coverage here is incomplete. The content test utils // for executing script force a user gesture, so it's impossible to test // the no user gesture case without a lot of code duplication. EXPECT_TRUE(ExecuteCopyInSelectedTab()) << message_; EXPECT_FALSE(ExecutePasteInSelectedTab()) << message_; if (!base::FeatureList::IsEnabled(features::kUserActivationV2)) { EXPECT_TRUE(ExecuteCommandInIframeInSelectedTab("copy")) << message_; } else { // In UserActivationV2, acitvation doesn't propagate to a child frame. EXPECT_FALSE(ExecuteCommandInIframeInSelectedTab("copy")) << message_; } EXPECT_FALSE(ExecuteCommandInIframeInSelectedTab("paste")) << message_; }
null
null
null
null
55,556
47,440
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
47,440
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "cc/layers/painted_scrollbar_layer_impl.h" #include <stddef.h> #include "cc/resources/ui_resource_bitmap.h" #include "cc/test/layer_test_common.h" #include "components/viz/common/quads/draw_quad.h" #include "components/viz/common/quads/texture_draw_quad.h" #include "testing/gtest/include/gtest/gtest.h" namespace cc { namespace { TEST(PaintedScrollbarLayerImplTest, Occlusion) { gfx::Size layer_size(10, 1000); float scale = 2.f; gfx::Size scaled_layer_size(20, 2000); gfx::Size viewport_size(1000, 1000); float thumb_opacity = 0.2f; LayerTestCommon::LayerImplTest impl; SkBitmap thumb_sk_bitmap; thumb_sk_bitmap.allocN32Pixels(10, 10); thumb_sk_bitmap.setImmutable(); UIResourceId thumb_uid = 5; UIResourceBitmap thumb_bitmap(thumb_sk_bitmap); impl.host_impl()->CreateUIResource(thumb_uid, thumb_bitmap); SkBitmap track_sk_bitmap; track_sk_bitmap.allocN32Pixels(10, 10); track_sk_bitmap.setImmutable(); UIResourceId track_uid = 6; UIResourceBitmap track_bitmap(track_sk_bitmap); impl.host_impl()->CreateUIResource(track_uid, track_bitmap); ScrollbarOrientation orientation = VERTICAL; PaintedScrollbarLayerImpl* scrollbar_layer_impl = impl.AddChildToRoot<PaintedScrollbarLayerImpl>(orientation, false, false); scrollbar_layer_impl->SetBounds(layer_size); scrollbar_layer_impl->SetContentsOpaque(true); scrollbar_layer_impl->set_internal_contents_scale_and_bounds( scale, scaled_layer_size); scrollbar_layer_impl->SetDrawsContent(true); scrollbar_layer_impl->SetThumbThickness(layer_size.width()); scrollbar_layer_impl->SetThumbLength(500); scrollbar_layer_impl->SetTrackLength(layer_size.height()); scrollbar_layer_impl->SetCurrentPos(100.f / 4); scrollbar_layer_impl->SetClipLayerLength(100.f); scrollbar_layer_impl->SetScrollLayerLength(200.f); scrollbar_layer_impl->set_track_ui_resource_id(track_uid); scrollbar_layer_impl->set_thumb_ui_resource_id(thumb_uid); scrollbar_layer_impl->set_thumb_opacity(thumb_opacity); impl.CalcDrawProps(viewport_size); gfx::Rect thumb_rect = scrollbar_layer_impl->ComputeThumbQuadRect(); EXPECT_EQ(gfx::Rect(0, 500 / 4, 10, layer_size.height() / 2).ToString(), thumb_rect.ToString()); { SCOPED_TRACE("No occlusion"); gfx::Rect occluded; impl.AppendQuadsWithOcclusion(scrollbar_layer_impl, occluded); size_t partially_occluded_count = 0; LayerTestCommon::VerifyQuadsAreOccluded( impl.quad_list(), occluded, &partially_occluded_count); EXPECT_EQ(2u, impl.quad_list().size()); EXPECT_EQ(0u, partially_occluded_count); // Note: this is also testing that the thumb and track are both // scaled by the internal contents scale. It's not occlusion-related // but is easy to verify here. const viz::DrawQuad* thumb_draw_quad = impl.quad_list().ElementAt(0); const viz::DrawQuad* track_draw_quad = impl.quad_list().ElementAt(1); EXPECT_EQ(viz::DrawQuad::TEXTURE_CONTENT, thumb_draw_quad->material); EXPECT_EQ(viz::DrawQuad::TEXTURE_CONTENT, track_draw_quad->material); const viz::TextureDrawQuad* thumb_quad = viz::TextureDrawQuad::MaterialCast(thumb_draw_quad); const viz::TextureDrawQuad* track_quad = viz::TextureDrawQuad::MaterialCast(track_draw_quad); gfx::Rect scaled_thumb_rect = gfx::ScaleToEnclosingRect(thumb_rect, scale); EXPECT_EQ(track_quad->rect.ToString(), gfx::Rect(scaled_layer_size).ToString()); EXPECT_EQ(scrollbar_layer_impl->contents_opaque(), track_quad->shared_quad_state->are_contents_opaque); EXPECT_EQ(track_quad->visible_rect.ToString(), gfx::Rect(scaled_layer_size).ToString()); EXPECT_FALSE(track_quad->needs_blending); EXPECT_EQ(thumb_quad->rect.ToString(), scaled_thumb_rect.ToString()); EXPECT_EQ(thumb_quad->visible_rect.ToString(), scaled_thumb_rect.ToString()); EXPECT_TRUE(thumb_quad->needs_blending); EXPECT_EQ(scrollbar_layer_impl->contents_opaque(), thumb_quad->shared_quad_state->are_contents_opaque); for (size_t i = 0; i < 4; ++i) { EXPECT_EQ(thumb_opacity, thumb_quad->vertex_opacity[i]); EXPECT_EQ(1.f, track_quad->vertex_opacity[i]); } } { SCOPED_TRACE("Full occlusion"); gfx::Rect occluded(scrollbar_layer_impl->visible_layer_rect()); impl.AppendQuadsWithOcclusion(scrollbar_layer_impl, occluded); LayerTestCommon::VerifyQuadsExactlyCoverRect(impl.quad_list(), gfx::Rect()); EXPECT_EQ(impl.quad_list().size(), 0u); } { SCOPED_TRACE("Partial occlusion"); gfx::Rect occluded(0, 0, 5, 1000); impl.AppendQuadsWithOcclusion(scrollbar_layer_impl, occluded); size_t partially_occluded_count = 0; LayerTestCommon::VerifyQuadsAreOccluded( impl.quad_list(), occluded, &partially_occluded_count); // The layer outputs two quads, which is partially occluded. EXPECT_EQ(2u, impl.quad_list().size()); EXPECT_EQ(2u, partially_occluded_count); } } } // namespace } // namespace cc
null
null
null
null
44,303
45,460
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
45,460
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_SHELF_OVERFLOW_BUTTON_H_ #define ASH_SHELF_OVERFLOW_BUTTON_H_ #include "ash/ash_export.h" #include "base/macros.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/image/image_skia.h" #include "ui/views/controls/button/button.h" namespace ash { class Shelf; class ShelfView; // Shelf overflow chevron button. class ASH_EXPORT OverflowButton : public views::Button { public: // |shelf_view| is the view containing this button. OverflowButton(ShelfView* shelf_view, Shelf* shelf); ~OverflowButton() override; void OnShelfAlignmentChanged(); void OnOverflowBubbleShown(); void OnOverflowBubbleHidden(); // Updates background and schedules a paint. void UpdateShelfItemBackground(SkColor color); private: friend class OverflowButtonTestApi; enum class ChevronDirection { UP, DOWN, LEFT, RIGHT }; // Returns the direction of chevron image based on the shelf alignment and // overflow state. ChevronDirection GetChevronDirection() const; // Updates the chevron image according to GetChevronDirection(). void UpdateChevronImage(); // views::Button: std::unique_ptr<views::InkDrop> CreateInkDrop() override; std::unique_ptr<views::InkDropRipple> CreateInkDropRipple() const override; bool ShouldEnterPushedState(const ui::Event& event) override; void NotifyClick(const ui::Event& event) override; std::unique_ptr<views::InkDropMask> CreateInkDropMask() const override; void PaintButtonContents(gfx::Canvas* canvas) override; // Helper functions to paint the background and foreground of the button // at |bounds|. void PaintBackground(gfx::Canvas* canvas, const gfx::Rect& bounds); void PaintForeground(gfx::Canvas* canvas, const gfx::Rect& bounds); // Calculates the bounds of the overflow button based on the shelf alignment // and overflow shelf visibility. gfx::Rect CalculateButtonBounds() const; // The original upward chevron image. const gfx::ImageSkia upward_image_; // Cached rotations of |upward_image_|. gfx::ImageSkia downward_image_; gfx::ImageSkia leftward_image_; gfx::ImageSkia rightward_image_; // Current chevron image which is a pointer to one of the above images // according to current shelf alignment and overflow shelf visibility. const gfx::ImageSkia* chevron_image_; ShelfView* shelf_view_; Shelf* shelf_; // Color used to paint the background. SkColor background_color_; DISALLOW_COPY_AND_ASSIGN(OverflowButton); }; } // namespace ash #endif // ASH_SHELF_OVERFLOW_BUTTON_H_
null
null
null
null
42,323