file_name
int64 0
72.3k
| vulnerable_line_numbers
stringlengths 1
1.06k
⌀ | dataset_type
stringclasses 1
value | commit_hash
stringlengths 40
44
| unique_id
int64 0
271k
| project
stringclasses 10
values | target
int64 0
1
| repo_url
stringclasses 10
values | date
stringlengths 25
25
⌀ | code
stringlengths 0
20.4M
| CVE
stringlengths 13
43
⌀ | CWE
stringclasses 50
values | commit_link
stringlengths 73
97
⌀ | severity
stringclasses 4
values | __index_level_0__
int64 0
124k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20,755 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 185,750 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
#ifndef BE_H
#define BE_H
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <net/tcp.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/u64_stats_sync.h>
#include <linux/cpumask.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include "be_hw.h"
#include "be_roce.h"
#define DRV_VER "11.1.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
#define OC_NAME "Emulex OneConnect"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
#define DRV_DESC "Emulex OneConnect NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */
#define OC_SUBSYS_DEVICE_ID1 0xE602
#define OC_SUBSYS_DEVICE_ID2 0xE642
#define OC_SUBSYS_DEVICE_ID3 0xE612
#define OC_SUBSYS_DEVICE_ID4 0xE652
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN ((u16) 64)
/* allocate extra space to allow tunneling decapsulation without head reallocation */
#define BE_RX_SKB_ALLOC_SIZE 256
#define BE_MAX_JUMBO_FRAME_SIZE 9018
#define BE_MIN_MTU 256
#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \
(ETH_HLEN + ETH_FCS_LEN))
/* Accommodate for QnQ configurations where VLAN insertion is enabled in HW */
#define BE_MAX_GSO_SIZE (65535 - 2 * VLAN_HLEN)
#define BE_NUM_VLANS_SUPPORTED 64
#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
#define EVNT_Q_LEN 1024
#define TX_Q_LEN 2048
#define TX_CQ_LEN 1024
#define RX_Q_LEN 1024 /* Does not support any other value */
#define RX_CQ_LEN 1024
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
#define BE2_MAX_RSS_QS 4
#define BE3_MAX_RSS_QS 16
#define BE3_MAX_TX_QS 16
#define BE3_MAX_EVT_QS 16
#define BE3_SRIOV_MAX_EVT_QS 8
#define SH_VF_MAX_NIC_EQS 3 /* Skyhawk VFs can have a max of 4 EQs
* and at least 1 is granted to either
* SURF/DPDK
*/
#define MAX_PORT_RSS_TABLES 15
#define MAX_NIC_FUNCS 16
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
#define MAX_TX_QS 32
#define MAX_ROCE_EQS 5
#define MAX_MSIX_VECTORS 32
#define MIN_MSIX_VECTORS 1
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define MAX_NUM_POST_ERX_DB 255u
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
#define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */
#define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
#define RSS_INDIR_TABLE_LEN 128
#define RSS_HASH_KEY_LEN 40
#define BE_UNKNOWN_PHY_STATE 0xFF
struct be_dma_mem {
void *va;
dma_addr_t dma;
u32 size;
};
struct be_queue_info {
u32 len;
u32 entry_size; /* Size of an element in the queue */
u32 tail, head;
atomic_t used; /* Number of valid elements in the queue */
u32 id;
struct be_dma_mem dma_mem;
bool created;
};
static inline u32 MODULO(u32 val, u32 limit)
{
BUG_ON(limit & (limit - 1));
return val & (limit - 1);
}
static inline void index_adv(u32 *index, u32 val, u32 limit)
{
*index = MODULO((*index + val), limit);
}
static inline void index_inc(u32 *index, u32 limit)
{
*index = MODULO((*index + 1), limit);
}
static inline void *queue_head_node(struct be_queue_info *q)
{
return q->dma_mem.va + q->head * q->entry_size;
}
static inline void *queue_tail_node(struct be_queue_info *q)
{
return q->dma_mem.va + q->tail * q->entry_size;
}
static inline void *queue_index_node(struct be_queue_info *q, u16 index)
{
return q->dma_mem.va + index * q->entry_size;
}
static inline void queue_head_inc(struct be_queue_info *q)
{
index_inc(&q->head, q->len);
}
static inline void index_dec(u32 *index, u32 limit)
{
*index = MODULO((*index - 1), limit);
}
static inline void queue_tail_inc(struct be_queue_info *q)
{
index_inc(&q->tail, q->len);
}
struct be_eq_obj {
struct be_queue_info q;
char desc[32];
/* Adaptive interrupt coalescing (AIC) info */
bool enable_aic;
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 eqd; /* configured val when aic is off */
u32 cur_eqd; /* in usecs */
u8 idx; /* array index */
u8 msix_idx;
u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
cpumask_var_t affinity_mask;
#ifdef CONFIG_NET_RX_BUSY_POLL
#define BE_EQ_IDLE 0
#define BE_EQ_NAPI 1 /* napi owns this EQ */
#define BE_EQ_POLL 2 /* poll owns this EQ */
#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
unsigned int state;
spinlock_t lock; /* lock to serialize napi and busy-poll */
#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
bool enable;
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
u32 et_eqd; /* configured val when aic is off */
ulong jiffies;
u64 rx_pkts_prev; /* Used to calculate RX pps */
u64 tx_reqs_prev; /* Used to calculate TX pps */
};
struct be_mcc_obj {
struct be_queue_info q;
struct be_queue_info cq;
bool rearm_cq;
};
struct be_tx_stats {
u64 tx_bytes;
u64 tx_pkts;
u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
ulong tx_jiffies;
u32 tx_stops;
u32 tx_drv_drops; /* pkts dropped by driver */
/* the error counters are described in be_ethtool.c */
u32 tx_hdr_parse_err;
u32 tx_dma_err;
u32 tx_tso_err;
u32 tx_spoof_check_err;
u32 tx_qinq_err;
u32 tx_internal_parity_err;
struct u64_stats_sync sync;
struct u64_stats_sync sync_compl;
};
/* Structure to hold some data of interest obtained from a TX CQE */
struct be_tx_compl_info {
u8 status; /* Completion status */
u16 end_index; /* Completed TXQ Index */
};
struct be_tx_obj {
u32 db_offset;
struct be_queue_info q;
struct be_queue_info cq;
struct be_tx_compl_info txcp;
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
u16 last_req_hdr; /* index of the last req's hdr-wrb */
} ____cacheline_aligned_in_smp;
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
/* set to page-addr for last frag of the page & frag-addr otherwise */
DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
bool last_frag; /* last frag of the page */
};
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
u64 rx_vxlan_offload_pkts;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
u32 rx_compl;
u32 rx_mcast_pkts;
u32 rx_compl_err; /* completions with err set */
struct u64_stats_sync sync;
};
struct be_rx_compl_info {
u32 rss_hash;
u16 vlan_tag;
u16 pkt_size;
u16 port;
u8 vlanf;
u8 num_rcvd;
u8 err;
u8 ipf;
u8 tcpf;
u8 udpf;
u8 ip_csum;
u8 l4_csum;
u8 ipv6;
u8 qnq;
u8 pkt_type;
u8 ip_frag;
u8 tunneled;
};
struct be_rx_obj {
struct be_adapter *adapter;
struct be_queue_info q;
struct be_queue_info cq;
struct be_rx_compl_info rxcp;
struct be_rx_page_info page_info_tbl[RX_Q_LEN];
struct be_rx_stats stats;
u8 rss_id;
bool rx_post_starved; /* Zero rx frags have been posted to BE */
} ____cacheline_aligned_in_smp;
struct be_drv_stats {
u32 eth_red_drops;
u32 dma_map_errors;
u32 rx_drops_no_pbuf;
u32 rx_drops_no_txpb;
u32 rx_drops_no_erx_descr;
u32 rx_drops_no_tpre_descr;
u32 rx_drops_too_many_frags;
u32 forwarded_packets;
u32 rx_drops_mtu;
u32 rx_crc_errors;
u32 rx_alignment_symbol_errors;
u32 rx_pause_frames;
u32 rx_priority_pause_frames;
u32 rx_control_frames;
u32 rx_in_range_errors;
u32 rx_out_range_errors;
u32 rx_frame_too_long;
u32 rx_address_filtered;
u32 rx_dropped_too_small;
u32 rx_dropped_too_short;
u32 rx_dropped_header_too_small;
u32 rx_dropped_tcp_length;
u32 rx_dropped_runt;
u32 rx_ip_checksum_errs;
u32 rx_tcp_checksum_errs;
u32 rx_udp_checksum_errs;
u32 tx_pauseframes;
u32 tx_priority_pauseframes;
u32 tx_controlframes;
u32 rxpp_fifo_overflow_drop;
u32 rx_input_fifo_overflow_drop;
u32 pmem_fifo_overflow_drop;
u32 jabber_events;
u32 rx_roce_bytes_lsd;
u32 rx_roce_bytes_msd;
u32 rx_roce_frames;
u32 roce_drops_payload_len;
u32 roce_drops_crc;
};
/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
#define BE_RESET_VLAN_TAG_ID 0xFFFF
struct be_vf_cfg {
unsigned char mac_addr[ETH_ALEN];
int if_handle;
int pmac_id;
u16 vlan_tag;
u32 tx_rate;
u32 plink_tracking;
u32 privileges;
bool spoofchk;
};
enum vf_state {
ENABLED = 0,
ASSIGNED = 1
};
#define BE_FLAGS_LINK_STATUS_INIT BIT(1)
#define BE_FLAGS_SRIOV_ENABLED BIT(2)
#define BE_FLAGS_WORKER_SCHEDULED BIT(3)
#define BE_FLAGS_NAPI_ENABLED BIT(6)
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7)
#define BE_FLAGS_VXLAN_OFFLOADS BIT(8)
#define BE_FLAGS_SETUP_DONE BIT(9)
#define BE_FLAGS_PHY_MISCONFIGURED BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
#define BE_FLAGS_OS2BMC BIT(12)
#define BE_FLAGS_TRY_RECOVERY BIT(13)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
#define MAX_ERR_RECOVERY_RETRY_COUNT 3
#define ERR_DETECTION_DELAY 1000
/* Ethtool set_dump flags */
#define LANCER_INITIATE_FW_DUMP 0x1
#define LANCER_DELETE_FW_DUMP 0x2
struct phy_info {
/* From SFF-8472 spec */
#define SFP_VENDOR_NAME_LEN 17
u8 transceiver;
u8 autoneg;
u8 fc_autoneg;
u8 port_type;
u16 phy_type;
u16 interface_type;
u32 misc_params;
u16 auto_speeds_supported;
u16 fixed_speeds_supported;
int link_speed;
u32 advertising;
u32 supported;
u8 cable_type;
u8 vendor_name[SFP_VENDOR_NAME_LEN];
u8 vendor_pn[SFP_VENDOR_NAME_LEN];
};
struct be_resources {
u16 max_vfs; /* Total VFs "really" supported by FW/HW */
u16 max_mcast_mac;
u16 max_tx_qs;
u16 max_rss_qs;
u16 max_rx_qs;
u16 max_cq_count;
u16 max_uc_mac; /* Max UC MACs programmable */
u16 max_vlans; /* Number of vlans supported */
u16 max_iface_count;
u16 max_mcc_count;
u16 max_evt_qs;
u16 max_nic_evt_qs; /* NIC's share of evt qs */
u32 if_cap_flags;
u32 vf_if_cap_flags; /* VF if capability flags */
u32 flags;
/* Calculated PF Pool's share of RSS Tables. This is not enforced by
* the FW, but is a self-imposed driver limitation.
*/
u16 max_rss_tables;
};
/* These are port-wide values */
struct be_port_resources {
u16 max_vfs;
u16 nic_pfs;
};
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
struct rss_info {
u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
u8 rss_queue[RSS_INDIR_TABLE_LEN];
u8 rss_hkey[RSS_HASH_KEY_LEN];
};
#define BE_INVALID_DIE_TEMP 0xFF
struct be_hwmon {
struct device *hwmon_dev;
u8 be_on_die_temp; /* Unit: millidegree Celsius */
};
/* Macros to read/write the 'features' word of be_wrb_params structure.
*/
#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT
#define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT)
#define BE_WRB_F_GET(word, name) \
(((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name))
#define BE_WRB_F_SET(word, name, val) \
((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name)))
/* Feature/offload bits */
enum {
BE_WRB_F_CRC_BIT, /* Ethernet CRC */
BE_WRB_F_IPCS_BIT, /* IP csum */
BE_WRB_F_TCPCS_BIT, /* TCP csum */
BE_WRB_F_UDPCS_BIT, /* UDP csum */
BE_WRB_F_LSO_BIT, /* LSO */
BE_WRB_F_LSO6_BIT, /* LSO6 */
BE_WRB_F_VLAN_BIT, /* VLAN */
BE_WRB_F_VLAN_SKIP_HW_BIT, /* Skip VLAN tag (workaround) */
BE_WRB_F_OS2BMC_BIT /* Send packet to the management ring */
};
/* The structure below provides a HW-agnostic abstraction of WRB params
* retrieved from a TX skb. This is in turn passed to chip specific routines
* during transmit, to set the corresponding params in the WRB.
*/
struct be_wrb_params {
u32 features; /* Feature bits */
u16 vlan_tag; /* VLAN tag */
u16 lso_mss; /* MSS for LSO */
};
struct be_eth_addr {
unsigned char mac[ETH_ALEN];
};
#define BE_SEC 1000 /* in msec */
#define BE_MIN (60 * BE_SEC) /* in msec */
#define BE_HOUR (60 * BE_MIN) /* in msec */
#define ERR_RECOVERY_MAX_RETRY_COUNT 3
#define ERR_RECOVERY_DETECTION_DELAY BE_SEC
#define ERR_RECOVERY_RETRY_DELAY (30 * BE_SEC)
/* UE-detection-duration in BEx/Skyhawk:
* All PFs must wait for this duration after they detect UE before reading
* SLIPORT_SEMAPHORE register. At the end of this duration, the Firmware
* guarantees that the SLIPORT_SEMAPHORE register is updated to indicate
* if the UE is recoverable.
*/
#define ERR_RECOVERY_UE_DETECT_DURATION BE_SEC
/* Initial idle time (in msec) to elapse after driver load,
* before UE recovery is allowed.
*/
#define ERR_IDLE_HR 24
#define ERR_RECOVERY_IDLE_TIME (ERR_IDLE_HR * BE_HOUR)
/* Time interval (in msec) after which UE recovery can be repeated */
#define ERR_INTERVAL_HR 72
#define ERR_RECOVERY_INTERVAL (ERR_INTERVAL_HR * BE_HOUR)
/* BEx/SH UE recovery state machine */
enum {
ERR_RECOVERY_ST_NONE = 0, /* No Recovery */
ERR_RECOVERY_ST_DETECT = 1, /* UE detection duration */
ERR_RECOVERY_ST_RESET = 2, /* Reset Phase (PF0 only) */
ERR_RECOVERY_ST_PRE_POLL = 3, /* Pre-Poll Phase (all PFs) */
ERR_RECOVERY_ST_REINIT = 4 /* Re-initialize Phase */
};
struct be_error_recovery {
/* Lancer error recovery variables */
u8 recovery_retries;
/* BEx/Skyhawk error recovery variables */
u8 recovery_state;
u16 ue_to_reset_time; /* Time after UE, to soft reset
* the chip - PF0 only
*/
u16 ue_to_poll_time; /* Time after UE, to Restart Polling
* of SLIPORT_SEMAPHORE reg
*/
u16 last_err_code;
bool recovery_supported;
unsigned long probe_time;
unsigned long last_recovery_time;
/* Common to both Lancer & BEx/SH error recovery */
u32 resched_delay;
struct delayed_work err_detection_work;
};
/* Ethtool priv_flags */
#define BE_DISABLE_TPE_RECOVERY 0x1
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
u16 cfg_num_tx_irqs; /* configured via set-channels */
u16 num_evt_qs;
u16 num_msix_vec;
struct be_eq_obj eq_obj[MAX_EVT_QS];
struct msix_entry msix_entries[MAX_MSIX_VECTORS];
bool isr_registered;
/* TX Rings */
u16 num_tx_qs;
struct be_tx_obj tx_obj[MAX_TX_QS];
/* Rx rings */
u16 num_rx_qs;
u16 num_rss_qs;
u16 need_def_rxq;
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
struct be_dma_mem stats_cmd;
/* Work queue used to perform periodic tasks like getting statistics */
struct delayed_work work;
u16 work_counter;
u8 recovery_retries;
u8 err_flags;
bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
u32 flags;
u32 cmd_privileges;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
char fw_on_flash[FW_VER_LEN];
/* IFACE filtering fields */
int if_handle; /* Used to configure filtering */
u32 if_flags; /* Interface filtering flags */
u32 *pmac_id; /* MAC addr handle used by BE card */
struct be_eth_addr *uc_list;/* list of uc-addrs programmed (not perm) */
u32 uc_macs; /* Count of secondary UC MAC programmed */
struct be_eth_addr *mc_list;/* list of mcast addrs programmed */
u32 mc_count;
unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u16 vlans_added;
bool update_uc_list;
bool update_mc_list;
struct mutex rx_filter_lock;/* For protecting vids[] & mc/uc_list[] */
u32 beacon_state; /* for set_phys_id */
u32 port_num;
char port_name;
u8 mc_type;
u32 function_mode;
u32 function_caps;
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool stats_cmd_sent;
struct {
u32 size;
u32 total_size;
u64 io_addr;
} roce_db;
u32 num_msix_roce_vec;
struct ocrdma_dev *ocrdma_dev;
struct list_head entry;
u32 flash_status;
struct completion et_cmd_compl;
struct be_resources pool_res; /* resources available for the port */
struct be_resources res; /* resources available for the func */
u16 num_vfs; /* Number of VFs provisioned by PF */
u8 pf_num; /* Numbering used by FW, starts at 0 */
u8 vf_num; /* Numbering used by FW, starts at 1 */
u8 virtfn;
struct be_vf_cfg *vf_cfg;
bool be3_native;
u32 sli_family;
u8 hba_port_num;
u16 pvid;
__be16 vxlan_port;
int vxlan_port_count;
int vxlan_port_aliases;
struct phy_info phy;
u8 wol_cap;
bool wol_en;
u16 asic_rev;
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
struct be_hwmon hwmon_info;
struct rss_info rss_info;
/* Filters for packets that need to be sent to BMC */
u32 bmc_filt_mask;
u32 fat_dump_len;
u16 serial_num[CNTL_SERIAL_NUM_WORDS];
u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
u8 dev_mac[ETH_ALEN];
u32 priv_flags; /* ethtool get/set_priv_flags() */
struct be_error_recovery error_recovery;
};
/* Used for defered FW config cmds. Add fields to this struct as reqd */
struct be_cmd_work {
struct work_struct work;
struct be_adapter *adapter;
union {
__be16 vxlan_port;
} info;
};
#define be_physfn(adapter) (!adapter->virtfn)
#define be_virtfn(adapter) (adapter->virtfn)
#define sriov_enabled(adapter) (adapter->flags & \
BE_FLAGS_SRIOV_ENABLED)
#define for_all_vfs(adapter, vf_cfg, i) \
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
i++, vf_cfg++)
#define ON 1
#define OFF 0
#define be_max_vlans(adapter) (adapter->res.max_vlans)
#define be_max_uc(adapter) (adapter->res.max_uc_mac)
#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
#define be_max_vfs(adapter) (adapter->pool_res.max_vfs)
#define be_max_rss(adapter) (adapter->res.max_rss_qs)
#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
/* Max number of EQs available avaialble only for NIC */
#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
#define be_max_pf_pool_rss_tables(adapter) \
(adapter->pool_res.max_rss_tables)
/* Max irqs avaialble for NIC */
#define be_max_irqs(adapter) \
(min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
/* Max irqs *needed* for RX queues */
static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
{
/* If no RSS, need atleast one irq for def-RXQ */
u16 num = max_t(u16, be_max_rss(adapter), 1);
return min_t(u16, num, be_max_irqs(adapter));
}
/* Max irqs *needed* for TX queues */
static inline u16 be_max_tx_irqs(struct be_adapter *adapter)
{
return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter));
}
/* Max irqs *needed* for combined queues */
static inline u16 be_max_qp_irqs(struct be_adapter *adapter)
{
return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
}
/* Max irqs *needed* for RX and TX queues together */
static inline u16 be_max_any_irqs(struct be_adapter *adapter)
{
return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
}
/* Is BE in pvid_tagging mode */
#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
/* Is BE in QNQ multi-channel mode */
#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
adapter->pdev->device == OC_DEVICE_ID6)
#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
adapter->pdev->device == OC_DEVICE_ID2)
#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
adapter->pdev->device == OC_DEVICE_ID1)
#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \
(adapter->function_mode & RDMA_ENABLED))
extern const struct ethtool_ops be_ethtool_ops;
#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
#define num_irqs(adapter) (msix_enabled(adapter) ? \
adapter->num_msix_vec : 1)
#define tx_stats(txo) (&(txo)->stats)
#define rx_stats(rxo) (&(rxo)->stats)
/* The default RXQ is the last RXQ */
#define default_rxo(adpt) (&adpt->rx_obj[adpt->num_rx_qs - 1])
#define for_all_rx_queues(adapter, rxo, i) \
for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
i++, rxo++)
#define for_all_rss_queues(adapter, rxo, i) \
for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \
i++, rxo++)
#define for_all_tx_queues(adapter, txo, i) \
for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
i++, txo++)
#define for_all_evt_queues(adapter, eqo, i) \
for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
i++, eqo++)
#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \
for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
#define for_all_tx_queues_on_eq(adapter, eqo, txo, i) \
for (i = eqo->idx, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;\
i += adapter->num_evt_qs, txo += adapter->num_evt_qs)
#define is_mcc_eqo(eqo) (eqo->idx == 0)
#define mcc_eqo(adapter) (&adapter->eq_obj[0])
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field) \
(((size_t)&(((_struct *)0)->field))%32)
/* Returns the bit mask of the field that is NOT shifted into location. */
static inline u32 amap_mask(u32 bitsize)
{
return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
}
static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
u32 *dw = (u32 *) ptr + dw_offset;
*dw &= ~(mask << offset);
*dw |= (mask & value) << offset;
}
#define AMAP_SET_BITS(_struct, field, ptr, val) \
amap_set(ptr, \
offsetof(_struct, field)/32, \
amap_mask(sizeof(((_struct *)0)->field)), \
AMAP_BIT_OFFSET(_struct, field), \
val)
static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
u32 *dw = (u32 *) ptr;
return mask & (*(dw + dw_offset) >> offset);
}
#define AMAP_GET_BITS(_struct, field, ptr) \
amap_get(ptr, \
offsetof(_struct, field)/32, \
amap_mask(sizeof(((_struct *)0)->field)), \
AMAP_BIT_OFFSET(_struct, field))
#define GET_RX_COMPL_V0_BITS(field, ptr) \
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr)
#define GET_RX_COMPL_V1_BITS(field, ptr) \
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr)
#define GET_TX_COMPL_BITS(field, ptr) \
AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr)
#define SET_TX_WRB_HDR_BITS(field, ptr, val) \
AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val)
#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
static inline void swap_dws(void *wrb, int len)
{
#ifdef __BIG_ENDIAN
u32 *dw = wrb;
BUG_ON(len % 4);
do {
*dw = cpu_to_le32(*dw);
dw++;
len -= 4;
} while (len);
#endif /* __BIG_ENDIAN */
}
#define be_cmd_status(status) (status > 0 ? -EIO : status)
static inline u8 is_tcp_pkt(struct sk_buff *skb)
{
u8 val = 0;
if (ip_hdr(skb)->version == 4)
val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
else if (ip_hdr(skb)->version == 6)
val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
return val;
}
static inline u8 is_udp_pkt(struct sk_buff *skb)
{
u8 val = 0;
if (ip_hdr(skb)->version == 4)
val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
else if (ip_hdr(skb)->version == 6)
val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
return val;
}
static inline bool is_ipv4_pkt(struct sk_buff *skb)
{
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
#define be_error_recovering(adapter) \
(adapter->flags & BE_FLAGS_TRY_RECOVERY)
#define BE_ERROR_EEH 1
#define BE_ERROR_UE BIT(1)
#define BE_ERROR_FW BIT(2)
#define BE_ERROR_HW (BE_ERROR_EEH | BE_ERROR_UE)
#define BE_ERROR_ANY (BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_FW)
#define BE_CLEAR_ALL 0xFF
static inline u8 be_check_error(struct be_adapter *adapter, u32 err_type)
{
return (adapter->err_flags & err_type);
}
static inline void be_set_error(struct be_adapter *adapter, int err_type)
{
struct net_device *netdev = adapter->netdev;
adapter->err_flags |= err_type;
netif_carrier_off(netdev);
dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
}
static inline void be_clear_error(struct be_adapter *adapter, int err_type)
{
adapter->err_flags &= ~err_type;
}
static inline bool be_multi_rxq(const struct be_adapter *adapter)
{
return adapter->num_rx_qs > 1;
}
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
void be_link_status_update(struct be_adapter *adapter, u8 link_status);
void be_parse_stats(struct be_adapter *adapter);
int be_load_fw(struct be_adapter *adapter, u8 *func);
bool be_is_wol_supported(struct be_adapter *adapter);
bool be_pause_supported(struct be_adapter *adapter);
u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
void be_eqd_update(struct be_adapter *adapter, bool force_update);
/*
* internal function to initialize-cleanup roce device.
*/
void be_roce_dev_add(struct be_adapter *);
void be_roce_dev_remove(struct be_adapter *);
/*
* internal function to open-close roce device during ifup-ifdown.
*/
void be_roce_dev_shutdown(struct be_adapter *);
#endif /* BE_H */
| null | null | null | null | 94,097 |
14,534 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 179,529 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
#ifndef _ASM_ARCH_THREAD_INFO_H
#define _ASM_ARCH_THREAD_INFO_H
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("and.d $sp,%0; ":"=r" (ti) : "0" (~8191UL));
return ti;
}
#endif
| null | null | null | null | 87,876 |
2,930 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 167,925 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _NET_BATMAN_ADV_LOG_H_
#define _NET_BATMAN_ADV_LOG_H_
#include "main.h"
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/printk.h>
#ifdef CONFIG_BATMAN_ADV_DEBUG
int batadv_debug_log_setup(struct batadv_priv *bat_priv);
void batadv_debug_log_cleanup(struct batadv_priv *bat_priv);
#else
static inline int batadv_debug_log_setup(struct batadv_priv *bat_priv)
{
return 0;
}
static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
{
}
#endif
/**
* enum batadv_dbg_level - available log levels
* @BATADV_DBG_BATMAN: OGM and TQ computations related messages
* @BATADV_DBG_ROUTES: route added / changed / deleted
* @BATADV_DBG_TT: translation table messages
* @BATADV_DBG_BLA: bridge loop avoidance messages
* @BATADV_DBG_DAT: ARP snooping and DAT related messages
* @BATADV_DBG_NC: network coding related messages
* @BATADV_DBG_MCAST: multicast related messages
* @BATADV_DBG_TP_METER: throughput meter messages
* @BATADV_DBG_ALL: the union of all the above log levels
*/
enum batadv_dbg_level {
BATADV_DBG_BATMAN = BIT(0),
BATADV_DBG_ROUTES = BIT(1),
BATADV_DBG_TT = BIT(2),
BATADV_DBG_BLA = BIT(3),
BATADV_DBG_DAT = BIT(4),
BATADV_DBG_NC = BIT(5),
BATADV_DBG_MCAST = BIT(6),
BATADV_DBG_TP_METER = BIT(7),
BATADV_DBG_ALL = 255,
};
#ifdef CONFIG_BATMAN_ADV_DEBUG
int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
__printf(2, 3);
/* possibly ratelimited debug output */
#define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \
do { \
if (atomic_read(&(bat_priv)->log_level) & (type) && \
(!(ratelimited) || net_ratelimit())) \
batadv_debug_log(bat_priv, fmt, ## arg); \
} \
while (0)
#else /* !CONFIG_BATMAN_ADV_DEBUG */
__printf(4, 5)
static inline void _batadv_dbg(int type __always_unused,
struct batadv_priv *bat_priv __always_unused,
int ratelimited __always_unused,
const char *fmt __always_unused, ...)
{
}
#endif
#define batadv_dbg(type, bat_priv, arg...) \
_batadv_dbg(type, bat_priv, 0, ## arg)
#define batadv_dbg_ratelimited(type, bat_priv, arg...) \
_batadv_dbg(type, bat_priv, 1, ## arg)
#define batadv_info(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
struct batadv_priv *_batpriv = netdev_priv(_netdev); \
batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
pr_info("%s: " fmt, _netdev->name, ## arg); \
} while (0)
#define batadv_err(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
struct batadv_priv *_batpriv = netdev_priv(_netdev); \
batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
pr_err("%s: " fmt, _netdev->name, ## arg); \
} while (0)
#endif /* _NET_BATMAN_ADV_LOG_H_ */
| null | null | null | null | 76,273 |
8,788 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 173,783 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* S3C6400 - GPIO lib support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef GPIO_SAMSUNG_S3C64XX_H
#define GPIO_SAMSUNG_S3C64XX_H
#ifdef CONFIG_GPIO_SAMSUNG
/* GPIO bank sizes */
#define S3C64XX_GPIO_A_NR (8)
#define S3C64XX_GPIO_B_NR (7)
#define S3C64XX_GPIO_C_NR (8)
#define S3C64XX_GPIO_D_NR (5)
#define S3C64XX_GPIO_E_NR (5)
#define S3C64XX_GPIO_F_NR (16)
#define S3C64XX_GPIO_G_NR (7)
#define S3C64XX_GPIO_H_NR (10)
#define S3C64XX_GPIO_I_NR (16)
#define S3C64XX_GPIO_J_NR (12)
#define S3C64XX_GPIO_K_NR (16)
#define S3C64XX_GPIO_L_NR (15)
#define S3C64XX_GPIO_M_NR (6)
#define S3C64XX_GPIO_N_NR (16)
#define S3C64XX_GPIO_O_NR (16)
#define S3C64XX_GPIO_P_NR (15)
#define S3C64XX_GPIO_Q_NR (9)
/* GPIO bank numbes */
/* CONFIG_S3C_GPIO_SPACE allows the user to select extra
* space for debugging purposes so that any accidental
* change from one gpio bank to another can be caught.
*/
#define S3C64XX_GPIO_NEXT(__gpio) \
((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1)
enum s3c_gpio_number {
S3C64XX_GPIO_A_START = 0,
S3C64XX_GPIO_B_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_A),
S3C64XX_GPIO_C_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_B),
S3C64XX_GPIO_D_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_C),
S3C64XX_GPIO_E_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_D),
S3C64XX_GPIO_F_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_E),
S3C64XX_GPIO_G_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_F),
S3C64XX_GPIO_H_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_G),
S3C64XX_GPIO_I_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_H),
S3C64XX_GPIO_J_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_I),
S3C64XX_GPIO_K_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_J),
S3C64XX_GPIO_L_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_K),
S3C64XX_GPIO_M_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_L),
S3C64XX_GPIO_N_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_M),
S3C64XX_GPIO_O_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_N),
S3C64XX_GPIO_P_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_O),
S3C64XX_GPIO_Q_START = S3C64XX_GPIO_NEXT(S3C64XX_GPIO_P),
};
/* S3C64XX GPIO number definitions. */
#define S3C64XX_GPA(_nr) (S3C64XX_GPIO_A_START + (_nr))
#define S3C64XX_GPB(_nr) (S3C64XX_GPIO_B_START + (_nr))
#define S3C64XX_GPC(_nr) (S3C64XX_GPIO_C_START + (_nr))
#define S3C64XX_GPD(_nr) (S3C64XX_GPIO_D_START + (_nr))
#define S3C64XX_GPE(_nr) (S3C64XX_GPIO_E_START + (_nr))
#define S3C64XX_GPF(_nr) (S3C64XX_GPIO_F_START + (_nr))
#define S3C64XX_GPG(_nr) (S3C64XX_GPIO_G_START + (_nr))
#define S3C64XX_GPH(_nr) (S3C64XX_GPIO_H_START + (_nr))
#define S3C64XX_GPI(_nr) (S3C64XX_GPIO_I_START + (_nr))
#define S3C64XX_GPJ(_nr) (S3C64XX_GPIO_J_START + (_nr))
#define S3C64XX_GPK(_nr) (S3C64XX_GPIO_K_START + (_nr))
#define S3C64XX_GPL(_nr) (S3C64XX_GPIO_L_START + (_nr))
#define S3C64XX_GPM(_nr) (S3C64XX_GPIO_M_START + (_nr))
#define S3C64XX_GPN(_nr) (S3C64XX_GPIO_N_START + (_nr))
#define S3C64XX_GPO(_nr) (S3C64XX_GPIO_O_START + (_nr))
#define S3C64XX_GPP(_nr) (S3C64XX_GPIO_P_START + (_nr))
#define S3C64XX_GPQ(_nr) (S3C64XX_GPIO_Q_START + (_nr))
/* the end of the S3C64XX specific gpios */
#define S3C64XX_GPIO_END (S3C64XX_GPQ(S3C64XX_GPIO_Q_NR) + 1)
#define S3C_GPIO_END S3C64XX_GPIO_END
/* define the number of gpios we need to the one after the GPQ() range */
#define GPIO_BOARD_START (S3C64XX_GPQ(S3C64XX_GPIO_Q_NR) + 1)
#endif /* GPIO_SAMSUNG */
#endif /* GPIO_SAMSUNG_S3C64XX_H */
| null | null | null | null | 82,130 |
27,057 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 27,057 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/* ANSI-C code produced by gperf version 3.0.4 */
/* Command-line: gperf --pic -m 100 fcobjshash.gperf */
/* Computed positions: -k'2-3' */
#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
&& ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
&& (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \
&& ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \
&& ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \
&& ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \
&& ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \
&& ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \
&& ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \
&& ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \
&& ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \
&& ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \
&& ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \
&& ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \
&& ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \
&& ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \
&& ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \
&& ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \
&& ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \
&& ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \
&& ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \
&& ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \
&& ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126))
/* The character set is not based on ISO-646. */
#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gnu-gperf@gnu.org>."
#endif
#line 1 "fcobjshash.gperf"
#line 13 "fcobjshash.gperf"
struct FcObjectTypeInfo {
int name;
int id;
};
#include <string.h>
/* maximum key range = 56, duplicates = 0 */
#ifdef __GNUC__
__inline
#else
#ifdef __cplusplus
inline
#endif
#endif
static unsigned int
FcObjectTypeHash (register const char *str, register unsigned int len)
{
static const unsigned char asso_values[] =
{
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 21, 30, 3,
36, 45, 60, 3, 15, 0, 60, 60, 0, 9,
9, 0, 21, 60, 0, 0, 15, 0, 60, 60,
0, 15, 24, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60
};
return len + asso_values[(unsigned char)str[2]] + asso_values[(unsigned char)str[1]];
}
struct FcObjectTypeNamePool_t
{
char FcObjectTypeNamePool_str4[sizeof("file")];
char FcObjectTypeNamePool_str5[sizeof("color")];
char FcObjectTypeNamePool_str7[sizeof("foundry")];
char FcObjectTypeNamePool_str8[sizeof("fullname")];
char FcObjectTypeNamePool_str9[sizeof("pixelsize")];
char FcObjectTypeNamePool_str10[sizeof("prgname")];
char FcObjectTypeNamePool_str12[sizeof("fullnamelang")];
char FcObjectTypeNamePool_str13[sizeof("globaladvance")];
char FcObjectTypeNamePool_str14[sizeof("postscriptname")];
char FcObjectTypeNamePool_str16[sizeof("hinting")];
char FcObjectTypeNamePool_str17[sizeof("minspace")];
char FcObjectTypeNamePool_str18[sizeof("hintstyle")];
char FcObjectTypeNamePool_str19[sizeof("fontformat")];
char FcObjectTypeNamePool_str20[sizeof("fontversion")];
char FcObjectTypeNamePool_str21[sizeof("fontfeatures")];
char FcObjectTypeNamePool_str22[sizeof("outline")];
char FcObjectTypeNamePool_str23[sizeof("autohint")];
char FcObjectTypeNamePool_str24[sizeof("dpi")];
char FcObjectTypeNamePool_str25[sizeof("hash")];
char FcObjectTypeNamePool_str26[sizeof("slant")];
char FcObjectTypeNamePool_str27[sizeof("aspect")];
char FcObjectTypeNamePool_str28[sizeof("size")];
char FcObjectTypeNamePool_str29[sizeof("scale")];
char FcObjectTypeNamePool_str30[sizeof("symbol")];
char FcObjectTypeNamePool_str31[sizeof("rasterizer")];
char FcObjectTypeNamePool_str32[sizeof("scalable")];
char FcObjectTypeNamePool_str33[sizeof("antialias")];
char FcObjectTypeNamePool_str34[sizeof("lang")];
char FcObjectTypeNamePool_str35[sizeof("style")];
char FcObjectTypeNamePool_str36[sizeof("family")];
char FcObjectTypeNamePool_str37[sizeof("rgba")];
char FcObjectTypeNamePool_str38[sizeof("namelang")];
char FcObjectTypeNamePool_str39[sizeof("stylelang")];
char FcObjectTypeNamePool_str40[sizeof("familylang")];
char FcObjectTypeNamePool_str41[sizeof("width")];
char FcObjectTypeNamePool_str42[sizeof("matrix")];
char FcObjectTypeNamePool_str43[sizeof("charset")];
char FcObjectTypeNamePool_str45[sizeof("charwidth")];
char FcObjectTypeNamePool_str46[sizeof("charheight")];
char FcObjectTypeNamePool_str47[sizeof("embolden")];
char FcObjectTypeNamePool_str48[sizeof("lcdfilter")];
char FcObjectTypeNamePool_str49[sizeof("spacing")];
char FcObjectTypeNamePool_str50[sizeof("index")];
char FcObjectTypeNamePool_str51[sizeof("weight")];
char FcObjectTypeNamePool_str52[sizeof("capability")];
char FcObjectTypeNamePool_str53[sizeof("embeddedbitmap")];
char FcObjectTypeNamePool_str58[sizeof("decorative")];
char FcObjectTypeNamePool_str59[sizeof("verticallayout")];
};
static const struct FcObjectTypeNamePool_t FcObjectTypeNamePool_contents =
{
"file",
"color",
"foundry",
"fullname",
"pixelsize",
"prgname",
"fullnamelang",
"globaladvance",
"postscriptname",
"hinting",
"minspace",
"hintstyle",
"fontformat",
"fontversion",
"fontfeatures",
"outline",
"autohint",
"dpi",
"hash",
"slant",
"aspect",
"size",
"scale",
"symbol",
"rasterizer",
"scalable",
"antialias",
"lang",
"style",
"family",
"rgba",
"namelang",
"stylelang",
"familylang",
"width",
"matrix",
"charset",
"charwidth",
"charheight",
"embolden",
"lcdfilter",
"spacing",
"index",
"weight",
"capability",
"embeddedbitmap",
"decorative",
"verticallayout"
};
#define FcObjectTypeNamePool ((const char *) &FcObjectTypeNamePool_contents)
#ifdef __GNUC__
__inline
#if defined __GNUC_STDC_INLINE__ || defined __GNUC_GNU_INLINE__
__attribute__ ((__gnu_inline__))
#endif
#endif
const struct FcObjectTypeInfo *
FcObjectTypeLookup (register const char *str, register unsigned int len)
{
enum
{
TOTAL_KEYWORDS = 48,
MIN_WORD_LENGTH = 3,
MAX_WORD_LENGTH = 14,
MIN_HASH_VALUE = 4,
MAX_HASH_VALUE = 59
};
static const struct FcObjectTypeInfo wordlist[] =
{
{-1}, {-1}, {-1}, {-1},
#line 38 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str4,FC_FILE_OBJECT},
#line 64 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str5,FC_COLOR_OBJECT},
{-1},
#line 31 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str7,FC_FOUNDRY_OBJECT},
#line 22 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str8,FC_FULLNAME_OBJECT},
#line 29 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str9,FC_PIXEL_SIZE_OBJECT},
#line 61 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str10,FC_PRGNAME_OBJECT},
{-1},
#line 23 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str12,FC_FULLNAMELANG_OBJECT},
#line 37 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str13,FC_GLOBAL_ADVANCE_OBJECT},
#line 63 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str14,FC_POSTSCRIPT_NAME_OBJECT},
{-1},
#line 34 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str16,FC_HINTING_OBJECT},
#line 46 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str17,FC_MINSPACE_OBJECT},
#line 33 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str18,FC_HINT_STYLE_OBJECT},
#line 54 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str19,FC_FONTFORMAT_OBJECT},
#line 52 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str20,FC_FONTVERSION_OBJECT},
#line 60 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str21,FC_FONT_FEATURES_OBJECT},
#line 41 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str22,FC_OUTLINE_OBJECT},
#line 36 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str23,FC_AUTOHINT_OBJECT},
#line 43 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str24,FC_DPI_OBJECT},
#line 62 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str25,FC_HASH_OBJECT},
#line 24 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str26,FC_SLANT_OBJECT},
#line 28 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str27,FC_ASPECT_OBJECT},
#line 27 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str28,FC_SIZE_OBJECT},
#line 45 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str29,FC_SCALE_OBJECT},
#line 65 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str30,FC_SYMBOL_OBJECT},
#line 40 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str31,FC_RASTERIZER_OBJECT},
#line 42 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str32,FC_SCALABLE_OBJECT},
#line 32 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str33,FC_ANTIALIAS_OBJECT},
#line 51 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str34,FC_LANG_OBJECT},
#line 20 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str35,FC_STYLE_OBJECT},
#line 18 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str36,FC_FAMILY_OBJECT},
#line 44 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str37,FC_RGBA_OBJECT},
#line 59 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str38,FC_NAMELANG_OBJECT},
#line 21 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str39,FC_STYLELANG_OBJECT},
#line 19 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str40,FC_FAMILYLANG_OBJECT},
#line 26 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str41,FC_WIDTH_OBJECT},
#line 49 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str42,FC_MATRIX_OBJECT},
#line 50 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str43,FC_CHARSET_OBJECT},
{-1},
#line 47 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str45,FC_CHARWIDTH_OBJECT},
#line 48 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str46,FC_CHAR_HEIGHT_OBJECT},
#line 55 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str47,FC_EMBOLDEN_OBJECT},
#line 58 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str48,FC_LCD_FILTER_OBJECT},
#line 30 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str49,FC_SPACING_OBJECT},
#line 39 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str50,FC_INDEX_OBJECT},
#line 25 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str51,FC_WEIGHT_OBJECT},
#line 53 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str52,FC_CAPABILITY_OBJECT},
#line 56 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str53,FC_EMBEDDED_BITMAP_OBJECT},
{-1}, {-1}, {-1}, {-1},
#line 57 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str58,FC_DECORATIVE_OBJECT},
#line 35 "fcobjshash.gperf"
{(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str59,FC_VERTICAL_LAYOUT_OBJECT}
};
if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
{
register int key = FcObjectTypeHash (str, len);
if (key <= MAX_HASH_VALUE && key >= 0)
{
register int o = wordlist[key].name;
if (o >= 0)
{
register const char *s = o + FcObjectTypeNamePool;
if (*str == *s && !strcmp (str + 1, s + 1))
return &wordlist[key];
}
}
}
return 0;
}
| null | null | null | null | 23,920 |
22,891 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 187,886 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/* sunsab.h: Register Definitions for the Siemens SAB82532 DUSCC
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*/
#ifndef _SUNSAB_H
#define _SUNSAB_H
struct sab82532_async_rd_regs {
u8 rfifo[0x20]; /* Receive FIFO */
u8 star; /* Status Register */
u8 __pad1;
u8 mode; /* Mode Register */
u8 timr; /* Timer Register */
u8 xon; /* XON Character */
u8 xoff; /* XOFF Character */
u8 tcr; /* Termination Character Register */
u8 dafo; /* Data Format */
u8 rfc; /* RFIFO Control Register */
u8 __pad2;
u8 rbcl; /* Receive Byte Count Low */
u8 rbch; /* Receive Byte Count High */
u8 ccr0; /* Channel Configuration Register 0 */
u8 ccr1; /* Channel Configuration Register 1 */
u8 ccr2; /* Channel Configuration Register 2 */
u8 ccr3; /* Channel Configuration Register 3 */
u8 __pad3[4];
u8 vstr; /* Version Status Register */
u8 __pad4[3];
u8 gis; /* Global Interrupt Status */
u8 ipc; /* Interrupt Port Configuration */
u8 isr0; /* Interrupt Status 0 */
u8 isr1; /* Interrupt Status 1 */
u8 pvr; /* Port Value Register */
u8 pis; /* Port Interrupt Status */
u8 pcr; /* Port Configuration Register */
u8 ccr4; /* Channel Configuration Register 4 */
};
struct sab82532_async_wr_regs {
u8 xfifo[0x20]; /* Transmit FIFO */
u8 cmdr; /* Command Register */
u8 __pad1;
u8 mode;
u8 timr;
u8 xon;
u8 xoff;
u8 tcr;
u8 dafo;
u8 rfc;
u8 __pad2;
u8 xbcl; /* Transmit Byte Count Low */
u8 xbch; /* Transmit Byte Count High */
u8 ccr0;
u8 ccr1;
u8 ccr2;
u8 ccr3;
u8 tsax; /* Time-Slot Assignment Reg. Transmit */
u8 tsar; /* Time-Slot Assignment Reg. Receive */
u8 xccr; /* Transmit Channel Capacity Register */
u8 rccr; /* Receive Channel Capacity Register */
u8 bgr; /* Baud Rate Generator Register */
u8 tic; /* Transmit Immediate Character */
u8 mxn; /* Mask XON Character */
u8 mxf; /* Mask XOFF Character */
u8 iva; /* Interrupt Vector Address */
u8 ipc;
u8 imr0; /* Interrupt Mask Register 0 */
u8 imr1; /* Interrupt Mask Register 1 */
u8 pvr;
u8 pim; /* Port Interrupt Mask */
u8 pcr;
u8 ccr4;
};
struct sab82532_async_rw_regs { /* Read/Write registers */
u8 __pad1[0x20];
u8 __pad2;
u8 __pad3;
u8 mode;
u8 timr;
u8 xon;
u8 xoff;
u8 tcr;
u8 dafo;
u8 rfc;
u8 __pad4;
u8 __pad5;
u8 __pad6;
u8 ccr0;
u8 ccr1;
u8 ccr2;
u8 ccr3;
u8 __pad7;
u8 __pad8;
u8 __pad9;
u8 __pad10;
u8 __pad11;
u8 __pad12;
u8 __pad13;
u8 __pad14;
u8 __pad15;
u8 ipc;
u8 __pad16;
u8 __pad17;
u8 pvr;
u8 __pad18;
u8 pcr;
u8 ccr4;
};
union sab82532_async_regs {
__volatile__ struct sab82532_async_rd_regs r;
__volatile__ struct sab82532_async_wr_regs w;
__volatile__ struct sab82532_async_rw_regs rw;
};
union sab82532_irq_status {
unsigned short stat;
struct {
unsigned char isr0;
unsigned char isr1;
} sreg;
};
/* irqflags bits */
#define SAB82532_ALLS 0x00000001
#define SAB82532_XPR 0x00000002
#define SAB82532_REGS_PENDING 0x00000004
/* RFIFO Status Byte */
#define SAB82532_RSTAT_PE 0x80
#define SAB82532_RSTAT_FE 0x40
#define SAB82532_RSTAT_PARITY 0x01
/* Status Register (STAR) */
#define SAB82532_STAR_XDOV 0x80
#define SAB82532_STAR_XFW 0x40
#define SAB82532_STAR_RFNE 0x20
#define SAB82532_STAR_FCS 0x10
#define SAB82532_STAR_TEC 0x08
#define SAB82532_STAR_CEC 0x04
#define SAB82532_STAR_CTS 0x02
/* Command Register (CMDR) */
#define SAB82532_CMDR_RMC 0x80
#define SAB82532_CMDR_RRES 0x40
#define SAB82532_CMDR_RFRD 0x20
#define SAB82532_CMDR_STI 0x10
#define SAB82532_CMDR_XF 0x08
#define SAB82532_CMDR_XRES 0x01
/* Mode Register (MODE) */
#define SAB82532_MODE_FRTS 0x40
#define SAB82532_MODE_FCTS 0x20
#define SAB82532_MODE_FLON 0x10
#define SAB82532_MODE_RAC 0x08
#define SAB82532_MODE_RTS 0x04
#define SAB82532_MODE_TRS 0x02
#define SAB82532_MODE_TLP 0x01
/* Timer Register (TIMR) */
#define SAB82532_TIMR_CNT_MASK 0xe0
#define SAB82532_TIMR_VALUE_MASK 0x1f
/* Data Format (DAFO) */
#define SAB82532_DAFO_XBRK 0x40
#define SAB82532_DAFO_STOP 0x20
#define SAB82532_DAFO_PAR_SPACE 0x00
#define SAB82532_DAFO_PAR_ODD 0x08
#define SAB82532_DAFO_PAR_EVEN 0x10
#define SAB82532_DAFO_PAR_MARK 0x18
#define SAB82532_DAFO_PARE 0x04
#define SAB82532_DAFO_CHL8 0x00
#define SAB82532_DAFO_CHL7 0x01
#define SAB82532_DAFO_CHL6 0x02
#define SAB82532_DAFO_CHL5 0x03
/* RFIFO Control Register (RFC) */
#define SAB82532_RFC_DPS 0x40
#define SAB82532_RFC_DXS 0x20
#define SAB82532_RFC_RFDF 0x10
#define SAB82532_RFC_RFTH_1 0x00
#define SAB82532_RFC_RFTH_4 0x04
#define SAB82532_RFC_RFTH_16 0x08
#define SAB82532_RFC_RFTH_32 0x0c
#define SAB82532_RFC_TCDE 0x01
/* Received Byte Count High (RBCH) */
#define SAB82532_RBCH_DMA 0x80
#define SAB82532_RBCH_CAS 0x20
/* Transmit Byte Count High (XBCH) */
#define SAB82532_XBCH_DMA 0x80
#define SAB82532_XBCH_CAS 0x20
#define SAB82532_XBCH_XC 0x10
/* Channel Configuration Register 0 (CCR0) */
#define SAB82532_CCR0_PU 0x80
#define SAB82532_CCR0_MCE 0x40
#define SAB82532_CCR0_SC_NRZ 0x00
#define SAB82532_CCR0_SC_NRZI 0x08
#define SAB82532_CCR0_SC_FM0 0x10
#define SAB82532_CCR0_SC_FM1 0x14
#define SAB82532_CCR0_SC_MANCH 0x18
#define SAB82532_CCR0_SM_HDLC 0x00
#define SAB82532_CCR0_SM_SDLC_LOOP 0x01
#define SAB82532_CCR0_SM_BISYNC 0x02
#define SAB82532_CCR0_SM_ASYNC 0x03
/* Channel Configuration Register 1 (CCR1) */
#define SAB82532_CCR1_ODS 0x10
#define SAB82532_CCR1_BCR 0x08
#define SAB82532_CCR1_CM_MASK 0x07
/* Channel Configuration Register 2 (CCR2) */
#define SAB82532_CCR2_SOC1 0x80
#define SAB82532_CCR2_SOC0 0x40
#define SAB82532_CCR2_BR9 0x80
#define SAB82532_CCR2_BR8 0x40
#define SAB82532_CCR2_BDF 0x20
#define SAB82532_CCR2_SSEL 0x10
#define SAB82532_CCR2_XCS0 0x20
#define SAB82532_CCR2_RCS0 0x10
#define SAB82532_CCR2_TOE 0x08
#define SAB82532_CCR2_RWX 0x04
#define SAB82532_CCR2_DIV 0x01
/* Channel Configuration Register 3 (CCR3) */
#define SAB82532_CCR3_PSD 0x01
/* Time Slot Assignment Register Transmit (TSAX) */
#define SAB82532_TSAX_TSNX_MASK 0xfc
#define SAB82532_TSAX_XCS2 0x02 /* see also CCR2 */
#define SAB82532_TSAX_XCS1 0x01
/* Time Slot Assignment Register Receive (TSAR) */
#define SAB82532_TSAR_TSNR_MASK 0xfc
#define SAB82532_TSAR_RCS2 0x02 /* see also CCR2 */
#define SAB82532_TSAR_RCS1 0x01
/* Version Status Register (VSTR) */
#define SAB82532_VSTR_CD 0x80
#define SAB82532_VSTR_DPLA 0x40
#define SAB82532_VSTR_VN_MASK 0x0f
#define SAB82532_VSTR_VN_1 0x00
#define SAB82532_VSTR_VN_2 0x01
#define SAB82532_VSTR_VN_3_2 0x02
/* Global Interrupt Status Register (GIS) */
#define SAB82532_GIS_PI 0x80
#define SAB82532_GIS_ISA1 0x08
#define SAB82532_GIS_ISA0 0x04
#define SAB82532_GIS_ISB1 0x02
#define SAB82532_GIS_ISB0 0x01
/* Interrupt Vector Address (IVA) */
#define SAB82532_IVA_MASK 0xf1
/* Interrupt Port Configuration (IPC) */
#define SAB82532_IPC_VIS 0x80
#define SAB82532_IPC_SLA1 0x10
#define SAB82532_IPC_SLA0 0x08
#define SAB82532_IPC_CASM 0x04
#define SAB82532_IPC_IC_OPEN_DRAIN 0x00
#define SAB82532_IPC_IC_ACT_LOW 0x01
#define SAB82532_IPC_IC_ACT_HIGH 0x03
/* Interrupt Status Register 0 (ISR0) */
#define SAB82532_ISR0_TCD 0x80
#define SAB82532_ISR0_TIME 0x40
#define SAB82532_ISR0_PERR 0x20
#define SAB82532_ISR0_FERR 0x10
#define SAB82532_ISR0_PLLA 0x08
#define SAB82532_ISR0_CDSC 0x04
#define SAB82532_ISR0_RFO 0x02
#define SAB82532_ISR0_RPF 0x01
/* Interrupt Status Register 1 (ISR1) */
#define SAB82532_ISR1_BRK 0x80
#define SAB82532_ISR1_BRKT 0x40
#define SAB82532_ISR1_ALLS 0x20
#define SAB82532_ISR1_XOFF 0x10
#define SAB82532_ISR1_TIN 0x08
#define SAB82532_ISR1_CSC 0x04
#define SAB82532_ISR1_XON 0x02
#define SAB82532_ISR1_XPR 0x01
/* Interrupt Mask Register 0 (IMR0) */
#define SAB82532_IMR0_TCD 0x80
#define SAB82532_IMR0_TIME 0x40
#define SAB82532_IMR0_PERR 0x20
#define SAB82532_IMR0_FERR 0x10
#define SAB82532_IMR0_PLLA 0x08
#define SAB82532_IMR0_CDSC 0x04
#define SAB82532_IMR0_RFO 0x02
#define SAB82532_IMR0_RPF 0x01
/* Interrupt Mask Register 1 (IMR1) */
#define SAB82532_IMR1_BRK 0x80
#define SAB82532_IMR1_BRKT 0x40
#define SAB82532_IMR1_ALLS 0x20
#define SAB82532_IMR1_XOFF 0x10
#define SAB82532_IMR1_TIN 0x08
#define SAB82532_IMR1_CSC 0x04
#define SAB82532_IMR1_XON 0x02
#define SAB82532_IMR1_XPR 0x01
/* Port Interrupt Status Register (PIS) */
#define SAB82532_PIS_SYNC_B 0x08
#define SAB82532_PIS_DTR_B 0x04
#define SAB82532_PIS_DTR_A 0x02
#define SAB82532_PIS_SYNC_A 0x01
/* Channel Configuration Register 4 (CCR4) */
#define SAB82532_CCR4_MCK4 0x80
#define SAB82532_CCR4_EBRG 0x40
#define SAB82532_CCR4_TST1 0x20
#define SAB82532_CCR4_ICD 0x10
#endif /* !(_SUNSAB_H) */
| null | null | null | null | 96,233 |
1,627 | null |
train_val
|
83ed75feba32e46f736fcce0d96a0445f29b96c2
| 163,471 |
krb5
| 0 |
https://github.com/krb5/krb5
|
2016-01-27 15:43:28-05:00
|
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* kdc/rtest.c */
/*
* Copyright 1991 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "k5-int.h"
#include <stdio.h>
#include "kdc_util.h"
#include "extern.h"
void krb5_klog_syslog(void);
static krb5_principal
make_princ(krb5_context ctx, const char *str, const char *prog)
{
krb5_principal ret;
char *dat;
if(!(ret = (krb5_principal) malloc(sizeof(krb5_principal_data)))) {
com_err(prog, ENOMEM, "while allocating principal data");
exit(3);
}
memset(ret, 0, sizeof(krb5_principal_data));
/* We do not include the null... */
if(!(dat = (char *) malloc(strlen(str)))) {
com_err(prog, ENOMEM, "while allocating principal realm data");
exit(3);
}
memcpy(dat, str, strlen(str));
krb5_princ_set_realm_data(ctx, ret, dat);
krb5_princ_set_realm_length(ctx, ret, strlen(str));
return ret;
}
int
main(int argc, char **argv)
{
krb5_data otrans;
krb5_data ntrans;
krb5_principal tgs, cl, sv;
krb5_error_code kret;
krb5_context ctx;
if (argc < 4) {
fprintf(stderr, "not enough args\n");
exit(1);
}
/* Get a context */
kret = krb5int_init_context_kdc(&ctx);
if (kret) {
com_err(argv[0], kret, "while getting krb5 context");
exit(2);
}
ntrans.length = 0;
ntrans.data = 0;
otrans.length = strlen(argv[1]);
if (otrans.length)
otrans.data = (char *) malloc(otrans.length);
else
otrans.data = 0;
memcpy(otrans.data,argv[1], otrans.length);
tgs = make_princ(ctx, argv[2], argv[0]);
cl = make_princ(ctx, argv[3], argv[0]);
sv = make_princ(ctx, argv[4], argv[0]);
add_to_transited(&otrans,&ntrans,tgs,cl,sv);
printf("%s\n",ntrans.data);
/* Free up all memory so we can profile for leaks */
if (otrans.data)
free(otrans.data);
free(ntrans.data);
krb5_free_principal(ctx, tgs);
krb5_free_principal(ctx, cl);
krb5_free_principal(ctx, sv);
krb5_free_context(ctx);
exit(0);
}
void krb5_klog_syslog(void) {}
kdc_realm_t *
find_realm_data(struct server_handle *handle,
char *rname, krb5_ui_4 rsize)
{
return 0;
}
| null | null | null | null | 74,779 |
56,689 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 56,689 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_CHROMEOS_LOGIN_SCREENS_WRONG_HWID_SCREEN_VIEW_H_
#define CHROME_BROWSER_CHROMEOS_LOGIN_SCREENS_WRONG_HWID_SCREEN_VIEW_H_
#include <string>
#include "chrome/browser/chromeos/login/oobe_screen.h"
namespace chromeos {
// Interface between wrong HWID screen and its representation.
// Note, do not forget to call OnViewDestroyed in the dtor.
class WrongHWIDScreenView {
public:
// Allows us to get info from wrong HWID screen that we need.
class Delegate {
public:
virtual ~Delegate() {}
// Called when screen is exited.
virtual void OnExit() = 0;
// This method is called, when view is being destroyed. Note, if Delegate
// is destroyed earlier then it has to call SetDelegate(NULL).
virtual void OnViewDestroyed(WrongHWIDScreenView* view) = 0;
};
constexpr static OobeScreen kScreenId = OobeScreen::SCREEN_WRONG_HWID;
virtual ~WrongHWIDScreenView() {}
virtual void Show() = 0;
virtual void Hide() = 0;
virtual void SetDelegate(Delegate* delegate) = 0;
};
} // namespace chromeos
#endif // CHROME_BROWSER_CHROMEOS_LOGIN_SCREENS_WRONG_HWID_SCREEN_VIEW_H_
| null | null | null | null | 53,552 |
14 |
8
|
train_val
|
26a59d9b46574e457870197dffa802871b4c8fc7
| 256,401 |
openssl
| 1 |
https://github.com/openssl/openssl
| null |
static const SSL_METHOD *ssl23_get_client_method(int ver)
{
#ifndef OPENSSL_NO_SSL2
if (ver == SSL2_VERSION)
return(SSLv2_client_method());
#endif
if (ver == SSL3_VERSION)
return(SSLv3_client_method());
else if (ver == TLS1_VERSION)
return(TLSv1_client_method());
else if (ver == TLS1_1_VERSION)
return(TLSv1_1_client_method());
else
return(NULL);
}
|
CVE-2014-3568
|
CWE-310
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commit;h=26a59d9b46574e457870197dffa802871b4c8fc7
|
Medium
| 4,084 |
39,215 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 204,210 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h
*
* Copyright (c) 2011, Tom Herbert <therbert@google.com>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/dynamic_queue_limits.h>
#include <linux/compiler.h>
#include <linux/export.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
unsigned int inprogress, prev_inprogress, limit;
unsigned int ovlimit, completed, num_queued;
bool all_prev_completed;
num_queued = ACCESS_ONCE(dql->num_queued);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
completed = dql->num_completed + count;
limit = dql->limit;
ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
inprogress = num_queued - completed;
prev_inprogress = dql->prev_num_queued - dql->num_completed;
all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
if ((ovlimit && !inprogress) ||
(dql->prev_ovlimit && all_prev_completed)) {
/*
* Queue considered starved if:
* - The queue was over-limit in the last interval,
* and there is no more data in the queue.
* OR
* - The queue was over-limit in the previous interval and
* when enqueuing it was possible that all queued data
* had been consumed. This covers the case when queue
* may have becomes starved between completion processing
* running and next time enqueue was scheduled.
*
* When queue is starved increase the limit by the amount
* of bytes both sent and completed in the last interval,
* plus any previous over-limit.
*/
limit += POSDIFF(completed, dql->prev_num_queued) +
dql->prev_ovlimit;
dql->slack_start_time = jiffies;
dql->lowest_slack = UINT_MAX;
} else if (inprogress && prev_inprogress && !all_prev_completed) {
/*
* Queue was not starved, check if the limit can be decreased.
* A decrease is only considered if the queue has been busy in
* the whole interval (the check above).
*
* If there is slack, the amount of execess data queued above
* the the amount needed to prevent starvation, the queue limit
* can be decreased. To avoid hysteresis we consider the
* minimum amount of slack found over several iterations of the
* completion routine.
*/
unsigned int slack, slack_last_objs;
/*
* Slack is the maximum of
* - The queue limit plus previous over-limit minus twice
* the number of objects completed. Note that two times
* number of completed bytes is a basis for an upper bound
* of the limit.
* - Portion of objects in the last queuing operation that
* was not part of non-zero previous over-limit. That is
* "round down" by non-overlimit portion of the last
* queueing operation.
*/
slack = POSDIFF(limit + dql->prev_ovlimit,
2 * (completed - dql->num_completed));
slack_last_objs = dql->prev_ovlimit ?
POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
slack = max(slack, slack_last_objs);
if (slack < dql->lowest_slack)
dql->lowest_slack = slack;
if (time_after(jiffies,
dql->slack_start_time + dql->slack_hold_time)) {
limit = POSDIFF(limit, dql->lowest_slack);
dql->slack_start_time = jiffies;
dql->lowest_slack = UINT_MAX;
}
}
/* Enforce bounds on limit */
limit = clamp(limit, dql->min_limit, dql->max_limit);
if (limit != dql->limit) {
dql->limit = limit;
ovlimit = 0;
}
dql->adj_limit = limit + completed;
dql->prev_ovlimit = ovlimit;
dql->prev_last_obj_cnt = dql->last_obj_cnt;
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
}
EXPORT_SYMBOL(dql_completed);
void dql_reset(struct dql *dql)
{
/* Reset all dynamic values */
dql->limit = 0;
dql->num_queued = 0;
dql->num_completed = 0;
dql->last_obj_cnt = 0;
dql->prev_num_queued = 0;
dql->prev_last_obj_cnt = 0;
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
}
EXPORT_SYMBOL(dql_reset);
int dql_init(struct dql *dql, unsigned hold_time)
{
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
dql_reset(dql);
return 0;
}
EXPORT_SYMBOL(dql_init);
| null | null | null | null | 112,557 |
38,765 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 203,760 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
#include <linux/bpf.h>
#include <string.h>
#include "libbpf.h"
#include "bpf_load.h"
#include "bpf_util.h"
#define MAX_INDEX 64
#define MAX_STARS 38
static void stars(char *str, long val, long max, int width)
{
int i;
for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
str[i] = '*';
if (val > max)
str[i - 1] = '+';
str[i] = '\0';
}
struct task {
char comm[16];
__u64 pid_tgid;
__u64 uid_gid;
};
struct hist_key {
struct task t;
__u32 index;
};
#define SIZE sizeof(struct task)
static void print_hist_for_pid(int fd, void *task)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct hist_key key = {}, next_key;
long values[nr_cpus];
char starstr[MAX_STARS];
long value;
long data[MAX_INDEX] = {};
int max_ind = -1;
long max_value = 0;
int i, ind;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (memcmp(&next_key, task, SIZE)) {
key = next_key;
continue;
}
bpf_map_lookup_elem(fd, &next_key, values);
value = 0;
for (i = 0; i < nr_cpus; i++)
value += values[i];
ind = next_key.index;
data[ind] = value;
if (value && ind > max_ind)
max_ind = ind;
if (value > max_value)
max_value = value;
key = next_key;
}
printf(" syscall write() stats\n");
printf(" byte_size : count distribution\n");
for (i = 1; i <= max_ind + 1; i++) {
stars(starstr, data[i - 1], max_value, MAX_STARS);
printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
(1l << i) >> 1, (1l << i) - 1, data[i - 1],
MAX_STARS, starstr);
}
}
static void print_hist(int fd)
{
struct hist_key key = {}, next_key;
static struct task tasks[1024];
int task_cnt = 0;
int i;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
int found = 0;
for (i = 0; i < task_cnt; i++)
if (memcmp(&tasks[i], &next_key, SIZE) == 0)
found = 1;
if (!found)
memcpy(&tasks[task_cnt++], &next_key, SIZE);
key = next_key;
}
for (i = 0; i < task_cnt; i++) {
printf("\npid %d cmd %s uid %d\n",
(__u32) tasks[i].pid_tgid,
tasks[i].comm,
(__u32) tasks[i].uid_gid);
print_hist_for_pid(fd, &tasks[i]);
}
}
static void int_exit(int sig)
{
print_hist(map_fd[1]);
exit(0);
}
int main(int ac, char **argv)
{
char filename[256];
long key, next_key, value;
FILE *f;
int i;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
signal(SIGINT, int_exit);
/* start 'ping' in the background to have some kfree_skb events */
f = popen("ping -c5 localhost", "r");
(void) f;
/* start 'dd' in the background to have plenty of 'write' syscalls */
f = popen("dd if=/dev/zero of=/dev/null count=5000000", "r");
(void) f;
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
return 1;
}
for (i = 0; i < 5; i++) {
key = 0;
while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
bpf_map_lookup_elem(map_fd[0], &next_key, &value);
printf("location 0x%lx count %ld\n", next_key, value);
key = next_key;
}
if (key)
printf("\n");
sleep(1);
}
print_hist(map_fd[1]);
return 0;
}
| null | null | null | null | 112,107 |
5,434 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 5,434 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ios/chrome/browser/passwords/credential_manager_features.h"
namespace features {
const base::Feature kCredentialManager{"CredentialManager",
base::FEATURE_DISABLED_BY_DEFAULT};
} // namespace features
| null | null | null | null | 2,297 |
13,350 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 13,350 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/rlz/rlz_tracker.h"
namespace rlz {
// static
rlz_lib::AccessPoint RLZTracker::ChromeOmnibox() {
return rlz_lib::CHROME_MAC_OMNIBOX;
}
// static
rlz_lib::AccessPoint RLZTracker::ChromeHomePage() {
return rlz_lib::CHROME_MAC_HOME_PAGE;
}
// static
rlz_lib::AccessPoint RLZTracker::ChromeAppList() {
return rlz_lib::CHROME_MAC_APP_LIST;
}
} // namespace rlz
| null | null | null | null | 10,213 |
31,823 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 31,823 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/css/properties/shorthands/webkit_text_stroke.h"
#include "third_party/blink/renderer/core/css/parser/css_property_parser_helpers.h"
#include "third_party/blink/renderer/core/style_property_shorthand.h"
namespace blink {
namespace CSSShorthand {
bool WebkitTextStroke::ParseShorthand(
bool important,
CSSParserTokenRange& range,
const CSSParserContext& context,
const CSSParserLocalContext&,
HeapVector<CSSPropertyValue, 256>& properties) const {
return CSSPropertyParserHelpers::ConsumeShorthandGreedilyViaLonghands(
webkitTextStrokeShorthand(), important, context, range, properties);
}
} // namespace CSSShorthand
} // namespace blink
| null | null | null | null | 28,686 |
29,677 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 194,672 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* USB device quirk handling logic and table
*
* Copyright (c) 2007 Oliver Neukum
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation, version 2.
*
*
*/
#include <linux/usb.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h>
#include "usb.h"
/* Lists of quirky USB devices, split in device quirks and interface quirks.
* Device quirks are applied at the very beginning of the enumeration process,
* right after reading the device descriptor. They can thus only match on device
* information.
*
* Interface quirks are applied after reading all the configuration descriptors.
* They can match on both device and interface information.
*
* Note that the DELAY_INIT and HONOR_BNUMINTERFACES quirks do not make sense as
* interface quirks, as they only influence the enumeration process which is run
* before processing the interface quirks.
*
* Please keep the lists ordered by:
* 1) Vendor ID
* 2) Product ID
* 3) Class ID
*/
static const struct usb_device_id usb_quirk_list[] = {
/* CBM - Flash disk */
{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
/* WORLDE easy key (easykey.25) MIDI controller */
{ USB_DEVICE(0x0218, 0x0401), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* HP 5300/5370C scanner */
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
/* USB3503 */
{ USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
/* Microsoft Wireless Laser Mouse 6000 Receiver */
{ USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech HD Pro Webcams C920 and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech PTZ Pro Camera */
{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam Orbit MP */
{ USB_DEVICE(0x046d, 0x08c2), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam Pro for Notebook */
{ USB_DEVICE(0x046d, 0x08c3), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam Pro 5000 */
{ USB_DEVICE(0x046d, 0x08c5), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam OEM Dell Notebook */
{ USB_DEVICE(0x046d, 0x08c6), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam OEM Cisco VT Camera II */
{ USB_DEVICE(0x046d, 0x08c7), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Harmony 700-series */
{ USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
/* Philips PSC805 audio device */
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
/* Plantronic Audio 655 DSP */
{ USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
/* Plantronic Audio 648 USB */
{ USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
/* Artisman Watchdog Dongle */
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Microchip Joss Optical infrared touchboard device */
{ USB_DEVICE(0x04d8, 0x000c), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* CarrolTouch 4000U */
{ USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
/* CarrolTouch 4500U */
{ USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
/* Samsung Android phone modem - ID conflict with SPH-I500 */
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Elan Touchscreen */
{ USB_DEVICE(0x04f3, 0x0089), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x009b), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x010c), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x0125), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x0381), .driver_info =
USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
/* Edirol SD-20 */
{ USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
/* Alcor Micro Corp. Hub */
{ USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Avision AV600U */
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* Saitek Cyborg Gold Joystick */
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
/* Guillemot Webcam Hercules Dualpix Exchange*/
{ USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
/* Midiman M-Audio Keystation 88es */
{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
/* Baum Vario Ultra */
{ USB_DEVICE(0x0904, 0x6101), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
{ USB_DEVICE(0x0904, 0x6102), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
{ USB_DEVICE(0x0904, 0x6103), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
/* Broadcom BCM92035DGROM BT dongle */
{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
/* MAYA44USB sound device */
{ USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
/* ASUS Base Station(T100) */
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* SKYMEDI USB_DRIVE */
{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
/* Razer - Razer Blade Keyboard */
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
/* BUILDWIN Photo Frame */
{ USB_DEVICE(0x1908, 0x1315), .driver_info =
USB_QUIRK_HONOR_BNUMINTERFACES },
/* Protocol and OTG Electrical Test Device */
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
/* Blackmagic Design Intensity Shuttle */
{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};
static const struct usb_device_id usb_interface_quirk_list[] = {
/* Logitech UVC Cameras */
{ USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
.driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};
static const struct usb_device_id usb_amd_resume_quirk_list[] = {
/* Lenovo Mouse with Pixart controller */
{ USB_DEVICE(0x17ef, 0x602e), .driver_info = USB_QUIRK_RESET_RESUME },
/* Pixart Mouse */
{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Optical Mouse M90/M100 */
{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};
static bool usb_match_any_interface(struct usb_device *udev,
const struct usb_device_id *id)
{
unsigned int i;
for (i = 0; i < udev->descriptor.bNumConfigurations; ++i) {
struct usb_host_config *cfg = &udev->config[i];
unsigned int j;
for (j = 0; j < cfg->desc.bNumInterfaces; ++j) {
struct usb_interface_cache *cache;
struct usb_host_interface *intf;
cache = cfg->intf_cache[j];
if (cache->num_altsetting == 0)
continue;
intf = &cache->altsetting[0];
if (usb_match_one_id_intf(udev, intf, id))
return true;
}
}
return false;
}
static int usb_amd_resume_quirk(struct usb_device *udev)
{
struct usb_hcd *hcd;
hcd = bus_to_hcd(udev->bus);
/* The device should be attached directly to root hub */
if (udev->level == 1 && hcd->amd_resume_bug == 1)
return 1;
return 0;
}
static u32 __usb_detect_quirks(struct usb_device *udev,
const struct usb_device_id *id)
{
u32 quirks = 0;
for (; id->match_flags; id++) {
if (!usb_match_device(udev, id))
continue;
if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_INFO) &&
!usb_match_any_interface(udev, id))
continue;
quirks |= (u32)(id->driver_info);
}
return quirks;
}
/*
* Detect any quirks the device has, and do any housekeeping for it if needed.
*/
void usb_detect_quirks(struct usb_device *udev)
{
udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
/*
* Pixart-based mice would trigger remote wakeup issue on AMD
* Yangtze chipset, so set them as RESET_RESUME flag.
*/
if (usb_amd_resume_quirk(udev))
udev->quirks |= __usb_detect_quirks(udev,
usb_amd_resume_quirk_list);
if (udev->quirks)
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
udev->quirks);
#ifdef CONFIG_USB_DEFAULT_PERSIST
if (!(udev->quirks & USB_QUIRK_RESET))
udev->persist_enabled = 1;
#else
/* Hubs are automatically enabled for USB-PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
udev->persist_enabled = 1;
#endif /* CONFIG_USB_DEFAULT_PERSIST */
}
void usb_detect_interface_quirks(struct usb_device *udev)
{
u32 quirks;
quirks = __usb_detect_quirks(udev, usb_interface_quirk_list);
if (quirks == 0)
return;
dev_dbg(&udev->dev, "USB interface quirks for this device: %x\n",
quirks);
udev->quirks |= quirks;
}
| null | null | null | null | 103,019 |
16,922 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 16,922 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_DISPLAY_H_
#define COMPONENTS_VIZ_SERVICE_DISPLAY_DISPLAY_H_
#include <memory>
#include <vector>
#include "base/macros.h"
#include "base/observer_list.h"
#include "cc/resources/display_resource_provider.h"
#include "components/viz/common/frame_sinks/begin_frame_source.h"
#include "components/viz/common/gpu/context_lost_observer.h"
#include "components/viz/common/resources/returned_resource.h"
#include "components/viz/common/surfaces/frame_sink_id.h"
#include "components/viz/common/surfaces/surface_id.h"
#include "components/viz/service/display/display_scheduler.h"
#include "components/viz/service/display/output_surface_client.h"
#include "components/viz/service/display/surface_aggregator.h"
#include "components/viz/service/surfaces/latest_local_surface_id_lookup_delegate.h"
#include "components/viz/service/surfaces/surface_manager.h"
#include "components/viz/service/viz_service_export.h"
#include "gpu/command_buffer/common/texture_in_use_response.h"
#include "ui/gfx/color_space.h"
#include "ui/latency/latency_info.h"
namespace cc {
class DisplayResourceProvider;
class RendererSettings;
} // namespace cc
namespace gfx {
class Size;
}
namespace viz {
class DirectRenderer;
class DisplayClient;
class OutputSurface;
class SharedBitmapManager;
class SoftwareRenderer;
class VIZ_SERVICE_EXPORT DisplayObserver {
public:
virtual ~DisplayObserver() {}
virtual void OnDisplayDidFinishFrame(const BeginFrameAck& ack) = 0;
};
// A Display produces a surface that can be used to draw to a physical display
// (OutputSurface). The client is responsible for creating and sizing the
// surface IDs used to draw into the display and deciding when to draw.
class VIZ_SERVICE_EXPORT Display : public DisplaySchedulerClient,
public OutputSurfaceClient,
public ContextLostObserver,
public LatestLocalSurfaceIdLookupDelegate {
public:
// The |begin_frame_source| and |scheduler| may be null (together). In that
// case, DrawAndSwap must be called externally when needed.
// The |current_task_runner| may be null if the Display is on a thread without
// a MessageLoop.
Display(SharedBitmapManager* bitmap_manager,
const RendererSettings& settings,
const FrameSinkId& frame_sink_id,
std::unique_ptr<OutputSurface> output_surface,
std::unique_ptr<DisplayScheduler> scheduler,
scoped_refptr<base::SingleThreadTaskRunner> current_task_runner);
~Display() override;
void Initialize(DisplayClient* client, SurfaceManager* surface_manager);
void AddObserver(DisplayObserver* observer);
void RemoveObserver(DisplayObserver* observer);
// device_scale_factor is used to communicate to the external window system
// what scale this was rendered at.
void SetLocalSurfaceId(const LocalSurfaceId& id, float device_scale_factor);
void SetVisible(bool visible);
void Resize(const gfx::Size& new_size);
// Sets the color matrix that will be used to transform the output of this
// display. This is only supported for GPU compositing.
void SetColorMatrix(const SkMatrix44& matrix);
void SetColorSpace(const gfx::ColorSpace& blending_color_space,
const gfx::ColorSpace& device_color_space);
void SetOutputIsSecure(bool secure);
const SurfaceId& CurrentSurfaceId();
// DisplaySchedulerClient implementation.
bool DrawAndSwap() override;
bool SurfaceHasUndrawnFrame(const SurfaceId& surface_id) const override;
bool SurfaceDamaged(const SurfaceId& surface_id,
const BeginFrameAck& ack) override;
void SurfaceDiscarded(const SurfaceId& surface_id) override;
void DidFinishFrame(const BeginFrameAck& ack) override;
// OutputSurfaceClient implementation.
void SetNeedsRedrawRect(const gfx::Rect& damage_rect) override;
void DidReceiveSwapBuffersAck(uint64_t swap_id) override;
void DidReceiveTextureInUseResponses(
const gpu::TextureInUseResponses& responses) override;
void DidReceiveCALayerParams(
const gfx::CALayerParams& ca_layer_params) override;
void DidReceivePresentationFeedback(
uint64_t swap_id,
const gfx::PresentationFeedback& feedback) override;
// LatestLocalSurfaceIdLookupDelegate implementation.
LocalSurfaceId GetSurfaceAtAggregation(
const FrameSinkId& frame_sink_id) const override;
bool has_scheduler() const { return !!scheduler_; }
DirectRenderer* renderer_for_testing() const { return renderer_.get(); }
void ForceImmediateDrawAndSwapIfPossible();
void SetNeedsOneBeginFrame();
void RemoveOverdrawQuads(CompositorFrame* frame);
private:
void InitializeRenderer();
void UpdateRootSurfaceResourcesLocked();
// ContextLostObserver implementation.
void OnContextLost() override;
SharedBitmapManager* const bitmap_manager_;
const RendererSettings settings_;
DisplayClient* client_ = nullptr;
base::ObserverList<DisplayObserver> observers_;
SurfaceManager* surface_manager_ = nullptr;
const FrameSinkId frame_sink_id_;
SurfaceId current_surface_id_;
gfx::Size current_surface_size_;
float device_scale_factor_ = 1.f;
gfx::ColorSpace blending_color_space_ = gfx::ColorSpace::CreateSRGB();
gfx::ColorSpace device_color_space_ = gfx::ColorSpace::CreateSRGB();
bool visible_ = false;
bool swapped_since_resize_ = false;
bool output_is_secure_ = false;
std::unique_ptr<OutputSurface> output_surface_;
std::unique_ptr<DisplayScheduler> scheduler_;
std::unique_ptr<cc::DisplayResourceProvider> resource_provider_;
std::unique_ptr<SurfaceAggregator> aggregator_;
// This may be null if the Display is on a thread without a MessageLoop.
scoped_refptr<base::SingleThreadTaskRunner> current_task_runner_;
std::unique_ptr<DirectRenderer> renderer_;
SoftwareRenderer* software_renderer_ = nullptr;
std::vector<ui::LatencyInfo> stored_latency_info_;
using PresentedCallbacks = std::vector<Surface::PresentedCallback>;
PresentedCallbacks presented_callbacks_;
PresentedCallbacks active_presented_callbacks_;
// TODO(penghuang): Remove it when we can get accurate presentation time from
// GPU for every SwapBuffers. https://crbug.com/776877
std::vector<PresentedCallbacks> previous_presented_callbacks_;
private:
DISALLOW_COPY_AND_ASSIGN(Display);
};
} // namespace viz
#endif // COMPONENTS_VIZ_SERVICE_DISPLAY_DISPLAY_H_
| null | null | null | null | 13,785 |
17,547 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 182,542 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* arch/xtensa/mm/init.c
*
* Derived from MIPS, PPC.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Marc Gauthier
* Kevin Chea
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
#include <linux/dma-contiguous.h>
#include <asm/bootparam.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/sysmem.h>
/*
* Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
{
/* Reserve all memory below PHYS_OFFSET, as memory
* accounting doesn't work for pages below that address.
*
* If PHYS_OFFSET is zero reserve page at address 0:
* successfull allocations should never return NULL.
*/
if (PHYS_OFFSET)
memblock_reserve(0, PHYS_OFFSET);
else
memblock_reserve(0, 1);
early_init_fdt_scan_reserved_mem();
if (!memblock_phys_mem_size())
panic("No memory found!\n");
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
memblock_dump_all();
}
void __init zones_init(void)
{
/* All pages are DMA-able, so we put them all in the DMA zone. */
unsigned long zones_size[MAX_NR_ZONES] = {
[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
#ifdef CONFIG_HIGHMEM
[ZONE_HIGHMEM] = max_pfn - max_low_pfn,
#endif
};
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
/*
* Initialize memory pages.
*/
void __init mem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
reset_all_zones_managed_pages();
for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
free_all_bootmem();
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
#ifdef CONFIG_MMU
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
#endif
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
#endif
#ifdef CONFIG_MMU
VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
#else
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
#endif
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
#ifdef CONFIG_BLK_DEV_INITRD
extern int initrd_is_mapped;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (initrd_is_mapped)
free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
void free_initmem(void)
{
free_initmem_default(-1);
}
static void __init parse_memmap_one(char *p)
{
char *oldp;
unsigned long start_at, mem_size;
if (!p)
return;
oldp = p;
mem_size = memparse(p, &p);
if (p == oldp)
return;
switch (*p) {
case '@':
start_at = memparse(p + 1, &p);
memblock_add(start_at, mem_size);
break;
case '$':
start_at = memparse(p + 1, &p);
memblock_reserve(start_at, mem_size);
break;
case 0:
memblock_reserve(mem_size, -mem_size);
break;
default:
pr_warn("Unrecognized memmap syntax: %s\n", p);
break;
}
}
static int __init parse_memmap_opt(char *str)
{
while (str) {
char *k = strchr(str, ',');
if (k)
*k++ = 0;
parse_memmap_one(str);
str = k;
}
return 0;
}
early_param("memmap", parse_memmap_opt);
| null | null | null | null | 90,889 |
52,476 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 52,476 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdint.h>
#include <string>
#include "media/base/mock_media_log.h"
#include "media/formats/mp4/aac.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::AllOf;
using ::testing::HasSubstr;
using ::testing::InSequence;
using ::testing::StrictMock;
namespace media {
namespace mp4 {
MATCHER_P(UnsupportedFrequencyIndexLog, frequency_index, "") {
return CONTAINS_STRING(
arg,
"Sampling Frequency Index(0x" +
std::string(frequency_index) + ") is not supported.");
}
MATCHER_P(UnsupportedExtensionFrequencyIndexLog, frequency_index, "") {
return CONTAINS_STRING(
arg,
"Extension Sampling Frequency Index(0x" +
std::string(frequency_index) + ") is not supported.");
}
MATCHER_P(UnsupportedChannelConfigLog, channel_index, "") {
return CONTAINS_STRING(
arg,
"Channel Configuration(" + std::string(channel_index) +
") is not supported");
}
MATCHER_P(UnsupportedAudioProfileLog, profile_string, "") {
return CONTAINS_STRING(
arg,
"Audio codec(" + std::string(profile_string) + ") is not supported");
}
class AACTest : public testing::Test {
public:
AACTest() = default;
bool Parse(const std::vector<uint8_t>& data) {
return aac_.Parse(data, &media_log_);
}
StrictMock<MockMediaLog> media_log_;
AAC aac_;
};
TEST_F(AACTest, BasicProfileTest) {
uint8_t buffer[] = {0x12, 0x10};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_TRUE(Parse(data));
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 44100);
EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
}
TEST_F(AACTest, ExtensionTest) {
uint8_t buffer[] = {0x13, 0x08, 0x56, 0xe5, 0x9d, 0x48, 0x80};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_TRUE(Parse(data));
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 48000);
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(true), 48000);
EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
}
// Test implicit SBR with mono channel config.
// Mono channel layout should only be reported if SBR is not
// specified. Otherwise stereo should be reported.
// See ISO 14496-3:2005 Section 1.6.5.3 for details about this special casing.
TEST_F(AACTest, ImplicitSBR_ChannelConfig0) {
uint8_t buffer[] = {0x13, 0x08};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_TRUE(Parse(data));
// Test w/o implict SBR.
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 24000);
EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_MONO);
// Test implicit SBR.
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(true), 48000);
EXPECT_EQ(aac_.GetChannelLayout(true), CHANNEL_LAYOUT_STEREO);
}
// Tests implicit SBR with a stereo channel config.
TEST_F(AACTest, ImplicitSBR_ChannelConfig1) {
uint8_t buffer[] = {0x13, 0x10};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_TRUE(Parse(data));
// Test w/o implict SBR.
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 24000);
EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
// Test implicit SBR.
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(true), 48000);
EXPECT_EQ(aac_.GetChannelLayout(true), CHANNEL_LAYOUT_STEREO);
}
TEST_F(AACTest, SixChannelTest) {
uint8_t buffer[] = {0x11, 0xb0};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_TRUE(Parse(data));
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 48000);
EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_5_1_BACK);
}
TEST_F(AACTest, DataTooShortTest) {
std::vector<uint8_t> data;
EXPECT_FALSE(Parse(data));
data.push_back(0x12);
EXPECT_FALSE(Parse(data));
}
TEST_F(AACTest, IncorrectProfileTest) {
InSequence s;
uint8_t buffer[] = {0x0, 0x08};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_MEDIA_LOG(UnsupportedAudioProfileLog("mp4a.40.0"));
EXPECT_FALSE(Parse(data));
data[0] = 0x08;
EXPECT_TRUE(Parse(data));
data[0] = 0x28;
// No media log for this profile 5, since not enough bits are in |data| to
// first parse profile 5's extension frequency index.
EXPECT_FALSE(Parse(data));
}
TEST_F(AACTest, IncorrectFrequencyTest) {
uint8_t buffer[] = {0x0f, 0x88};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_FALSE(Parse(data));
data[0] = 0x0e;
data[1] = 0x08;
EXPECT_TRUE(Parse(data));
}
TEST_F(AACTest, IncorrectChannelTest) {
uint8_t buffer[] = {0x0e, 0x00};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_FALSE(Parse(data));
data[1] = 0x08;
EXPECT_TRUE(Parse(data));
}
TEST_F(AACTest, UnsupportedProfileTest) {
InSequence s;
uint8_t buffer[] = {0x3a, 0x08};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_MEDIA_LOG(UnsupportedAudioProfileLog("mp4a.40.7"));
EXPECT_FALSE(Parse(data));
data[0] = 0x12;
data[1] = 0x18;
EXPECT_TRUE(Parse(data));
}
TEST_F(AACTest, UnsupportedChannelLayoutTest) {
InSequence s;
uint8_t buffer[] = {0x12, 0x78};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_MEDIA_LOG(UnsupportedChannelConfigLog("15"));
EXPECT_FALSE(Parse(data));
data[1] = 0x18;
EXPECT_TRUE(Parse(data));
}
TEST_F(AACTest, UnsupportedFrequencyIndexTest) {
InSequence s;
uint8_t buffer[] = {0x17, 0x10};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_MEDIA_LOG(UnsupportedFrequencyIndexLog("e"));
EXPECT_FALSE(Parse(data));
data[0] = 0x13;
EXPECT_TRUE(Parse(data));
}
TEST_F(AACTest, UnsupportedExFrequencyIndexTest) {
InSequence s;
uint8_t buffer[] = {0x29, 0x17, 0x08, 0x0};
std::vector<uint8_t> data;
data.assign(buffer, buffer + sizeof(buffer));
EXPECT_MEDIA_LOG(UnsupportedExtensionFrequencyIndexLog("e"));
EXPECT_FALSE(Parse(data));
data[1] = 0x11;
EXPECT_TRUE(Parse(data));
}
} // namespace mp4
} // namespace media
| null | null | null | null | 49,339 |
42,495 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 207,490 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/* linux/net/inet/arp.h */
#ifndef _ARP_H
#define _ARP_H
#include <linux/if_arp.h>
#include <linux/hash.h>
#include <net/neighbour.h>
extern struct neigh_table arp_tbl;
static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
{
u32 key = *(const u32 *)pkey;
u32 val = key ^ hash32_ptr(dev);
return val * hash_rnd[0];
}
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
{
struct neighbour *n;
rcu_read_lock_bh();
n = __ipv4_neigh_lookup_noref(dev, key);
if (n && !atomic_inc_not_zero(&n->refcnt))
n = NULL;
rcu_read_unlock_bh();
return n;
}
static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
{
struct neighbour *n;
rcu_read_lock_bh();
n = __ipv4_neigh_lookup_noref(dev, key);
if (n) {
unsigned long now = jiffies;
/* avoid dirtying neighbour */
if (n->confirmed != now)
n->confirmed = now;
}
rcu_read_unlock_bh();
}
void arp_init(void);
int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
void arp_send(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
const unsigned char *dest_hw,
const unsigned char *src_hw, const unsigned char *th);
int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
void arp_ifdown(struct net_device *dev);
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
const unsigned char *dest_hw,
const unsigned char *src_hw,
const unsigned char *target_hw);
void arp_xmit(struct sk_buff *skb);
#endif /* _ARP_H */
| null | null | null | null | 115,837 |
29,116 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 194,111 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
* Copyright (C) 2015 Linaro Ltd.
* Author: Jassi Brar <jaswinder.singh@linaro.org>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/amba/bus.h>
#include <linux/mailbox_controller.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
#define INTR_CLR_OFS 0x10
#define MHU_LP_OFFSET 0x0
#define MHU_HP_OFFSET 0x20
#define MHU_SEC_OFFSET 0x200
#define TX_REG_OFFSET 0x100
#define MHU_CHANS 3
struct mhu_link {
unsigned irq;
void __iomem *tx_reg;
void __iomem *rx_reg;
};
struct arm_mhu {
void __iomem *base;
struct mhu_link mlink[MHU_CHANS];
struct mbox_chan chan[MHU_CHANS];
struct mbox_controller mbox;
};
static irqreturn_t mhu_rx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = p;
struct mhu_link *mlink = chan->con_priv;
u32 val;
val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
if (!val)
return IRQ_NONE;
mbox_chan_received_data(chan, (void *)&val);
writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
return IRQ_HANDLED;
}
static bool mhu_last_tx_done(struct mbox_chan *chan)
{
struct mhu_link *mlink = chan->con_priv;
u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
return (val == 0);
}
static int mhu_send_data(struct mbox_chan *chan, void *data)
{
struct mhu_link *mlink = chan->con_priv;
u32 *arg = data;
writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
return 0;
}
static int mhu_startup(struct mbox_chan *chan)
{
struct mhu_link *mlink = chan->con_priv;
u32 val;
int ret;
val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
ret = request_irq(mlink->irq, mhu_rx_interrupt,
IRQF_SHARED, "mhu_link", chan);
if (ret) {
dev_err(chan->mbox->dev,
"Unable to acquire IRQ %d\n", mlink->irq);
return ret;
}
return 0;
}
static void mhu_shutdown(struct mbox_chan *chan)
{
struct mhu_link *mlink = chan->con_priv;
free_irq(mlink->irq, chan);
}
static const struct mbox_chan_ops mhu_ops = {
.send_data = mhu_send_data,
.startup = mhu_startup,
.shutdown = mhu_shutdown,
.last_tx_done = mhu_last_tx_done,
};
static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
{
int i, err;
struct arm_mhu *mhu;
struct device *dev = &adev->dev;
int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
/* Allocate memory for device */
mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
if (!mhu)
return -ENOMEM;
mhu->base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(mhu->base)) {
dev_err(dev, "ioremap failed\n");
return PTR_ERR(mhu->base);
}
for (i = 0; i < MHU_CHANS; i++) {
mhu->chan[i].con_priv = &mhu->mlink[i];
mhu->mlink[i].irq = adev->irq[i];
mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
}
mhu->mbox.dev = dev;
mhu->mbox.chans = &mhu->chan[0];
mhu->mbox.num_chans = MHU_CHANS;
mhu->mbox.ops = &mhu_ops;
mhu->mbox.txdone_irq = false;
mhu->mbox.txdone_poll = true;
mhu->mbox.txpoll_period = 1;
amba_set_drvdata(adev, mhu);
err = mbox_controller_register(&mhu->mbox);
if (err) {
dev_err(dev, "Failed to register mailboxes %d\n", err);
return err;
}
dev_info(dev, "ARM MHU Mailbox registered\n");
return 0;
}
static int mhu_remove(struct amba_device *adev)
{
struct arm_mhu *mhu = amba_get_drvdata(adev);
mbox_controller_unregister(&mhu->mbox);
return 0;
}
static struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, mhu_ids);
static struct amba_driver arm_mhu_driver = {
.drv = {
.name = "mhu",
},
.id_table = mhu_ids,
.probe = mhu_probe,
.remove = mhu_remove,
};
module_amba_driver(arm_mhu_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ARM MHU Driver");
MODULE_AUTHOR("Jassi Brar <jassisinghbrar@gmail.com>");
| null | null | null | null | 102,458 |
18,843 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 183,838 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Adaptec AIC7xxx device driver for Linux.
*
* Copyright (c) 1994 John Aycock
* The University of Calgary Department of Computer Science.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Copyright (c) 2000-2003 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#151 $
*
*/
#ifndef _AIC7XXX_LINUX_H_
#define _AIC7XXX_LINUX_H_
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
/* Core SCSI definitions */
#define AIC_LIB_PREFIX ahc
#include "cam.h"
#include "queue.h"
#include "scsi_message.h"
#include "aiclib.h"
/*********************************** Debugging ********************************/
#ifdef CONFIG_AIC7XXX_DEBUG_ENABLE
#ifdef CONFIG_AIC7XXX_DEBUG_MASK
#define AHC_DEBUG 1
#define AHC_DEBUG_OPTS CONFIG_AIC7XXX_DEBUG_MASK
#else
/*
* Compile in debugging code, but do not enable any printfs.
*/
#define AHC_DEBUG 1
#endif
/* No debugging code. */
#endif
/************************* Forward Declarations *******************************/
struct ahc_softc;
typedef struct pci_dev *ahc_dev_softc_t;
typedef struct scsi_cmnd *ahc_io_ctx_t;
/******************************* Byte Order ***********************************/
#define ahc_htobe16(x) cpu_to_be16(x)
#define ahc_htobe32(x) cpu_to_be32(x)
#define ahc_htobe64(x) cpu_to_be64(x)
#define ahc_htole16(x) cpu_to_le16(x)
#define ahc_htole32(x) cpu_to_le32(x)
#define ahc_htole64(x) cpu_to_le64(x)
#define ahc_be16toh(x) be16_to_cpu(x)
#define ahc_be32toh(x) be32_to_cpu(x)
#define ahc_be64toh(x) be64_to_cpu(x)
#define ahc_le16toh(x) le16_to_cpu(x)
#define ahc_le32toh(x) le32_to_cpu(x)
#define ahc_le64toh(x) le64_to_cpu(x)
/************************* Configuration Data *********************************/
extern u_int aic7xxx_no_probe;
extern u_int aic7xxx_allow_memio;
extern struct scsi_host_template aic7xxx_driver_template;
/***************************** Bus Space/DMA **********************************/
typedef uint32_t bus_size_t;
typedef enum {
BUS_SPACE_MEMIO,
BUS_SPACE_PIO
} bus_space_tag_t;
typedef union {
u_long ioport;
volatile uint8_t __iomem *maddr;
} bus_space_handle_t;
typedef struct bus_dma_segment
{
dma_addr_t ds_addr;
bus_size_t ds_len;
} bus_dma_segment_t;
struct ahc_linux_dma_tag
{
bus_size_t alignment;
bus_size_t boundary;
bus_size_t maxsize;
};
typedef struct ahc_linux_dma_tag* bus_dma_tag_t;
typedef dma_addr_t bus_dmamap_t;
typedef int bus_dma_filter_t(void*, dma_addr_t);
typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
#define BUS_DMA_WAITOK 0x0
#define BUS_DMA_NOWAIT 0x1
#define BUS_DMA_ALLOCNOW 0x2
#define BUS_DMA_LOAD_SEGS 0x4 /*
* Argument is an S/G list not
* a single buffer.
*/
#define BUS_SPACE_MAXADDR 0xFFFFFFFF
#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
int ahc_dma_tag_create(struct ahc_softc *, bus_dma_tag_t /*parent*/,
bus_size_t /*alignment*/, bus_size_t /*boundary*/,
dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
bus_dma_filter_t*/*filter*/, void */*filterarg*/,
bus_size_t /*maxsize*/, int /*nsegments*/,
bus_size_t /*maxsegsz*/, int /*flags*/,
bus_dma_tag_t */*dma_tagp*/);
void ahc_dma_tag_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/);
int ahc_dmamem_alloc(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
void** /*vaddr*/, int /*flags*/,
bus_dmamap_t* /*mapp*/);
void ahc_dmamem_free(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
void* /*vaddr*/, bus_dmamap_t /*map*/);
void ahc_dmamap_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/,
bus_dmamap_t /*map*/);
int ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t /*dmat*/,
bus_dmamap_t /*map*/, void * /*buf*/,
bus_size_t /*buflen*/, bus_dmamap_callback_t *,
void */*callback_arg*/, int /*flags*/);
int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
/*
* Operations performed by ahc_dmamap_sync().
*/
#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
/*
* XXX
* ahc_dmamap_sync is only used on buffers allocated with
* the pci_alloc_consistent() API. Although I'm not sure how
* this works on architectures with a write buffer, Linux does
* not have an API to sync "coherent" memory. Perhaps we need
* to do an mb()?
*/
#define ahc_dmamap_sync(ahc, dma_tag, dmamap, offset, len, op)
/********************************** Includes **********************************/
#ifdef CONFIG_AIC7XXX_REG_PRETTY_PRINT
#define AIC_DEBUG_REGISTERS 1
#else
#define AIC_DEBUG_REGISTERS 0
#endif
#include "aic7xxx.h"
/***************************** Timer Facilities *******************************/
static inline void
ahc_scb_timer_reset(struct scb *scb, u_int usec)
{
}
/***************************** SMP support ************************************/
#include <linux/spinlock.h>
#define AIC7XXX_DRIVER_VERSION "7.0"
/*************************** Device Data Structures ***************************/
/*
* A per probed device structure used to deal with some error recovery
* scenarios that the Linux mid-layer code just doesn't know how to
* handle. The structure allocated for a device only becomes persistent
* after a successfully completed inquiry command to the target when
* that inquiry data indicates a lun is present.
*/
typedef enum {
AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
} ahc_linux_dev_flags;
struct ahc_linux_device {
/*
* The number of transactions currently
* queued to the device.
*/
int active;
/*
* The currently allowed number of
* transactions that can be queued to
* the device. Must be signed for
* conversion from tagged to untagged
* mode where the device may have more
* than one outstanding active transaction.
*/
int openings;
/*
* A positive count indicates that this
* device's queue is halted.
*/
u_int qfrozen;
/*
* Cumulative command counter.
*/
u_long commands_issued;
/*
* The number of tagged transactions when
* running at our current opening level
* that have been successfully received by
* this device since the last QUEUE FULL.
*/
u_int tag_success_count;
#define AHC_TAG_SUCCESS_INTERVAL 50
ahc_linux_dev_flags flags;
/*
* The high limit for the tags variable.
*/
u_int maxtags;
/*
* The computed number of tags outstanding
* at the time of the last QUEUE FULL event.
*/
u_int tags_on_last_queuefull;
/*
* How many times we have seen a queue full
* with the same number of tags. This is used
* to stop our adaptive queue depth algorithm
* on devices with a fixed number of tags.
*/
u_int last_queuefull_same_count;
#define AHC_LOCK_TAGS_COUNT 50
/*
* How many transactions have been queued
* without the device going idle. We use
* this statistic to determine when to issue
* an ordered tag to prevent transaction
* starvation. This statistic is only updated
* if the AHC_DEV_PERIODIC_OTAG flag is set
* on this device.
*/
u_int commands_since_idle_or_otag;
#define AHC_OTAG_THRESH 500
};
/********************* Definitions Required by the Core ***********************/
/*
* Number of SG segments we require. So long as the S/G segments for
* a particular transaction are allocated in a physically contiguous
* manner and are allocated below 4GB, the number of S/G segments is
* unrestricted.
*/
#define AHC_NSEG 128
/*
* Per-SCB OSM storage.
*/
struct scb_platform_data {
struct ahc_linux_device *dev;
dma_addr_t buf_busaddr;
uint32_t xfer_len;
uint32_t sense_resid; /* Auto-Sense residual */
};
/*
* Define a structure used for each host adapter. All members are
* aligned on a boundary >= the size of the member to honor the
* alignment restrictions of the various platforms supported by
* this driver.
*/
struct ahc_platform_data {
/*
* Fields accessed from interrupt context.
*/
struct scsi_target *starget[AHC_NUM_TARGETS];
spinlock_t spin_lock;
u_int qfrozen;
struct completion *eh_done;
struct Scsi_Host *host; /* pointer to scsi host */
#define AHC_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
resource_size_t mem_busaddr; /* Mem Base Addr */
};
void ahc_delay(long);
/***************************** Low Level I/O **********************************/
uint8_t ahc_inb(struct ahc_softc * ahc, long port);
void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
void ahc_outsb(struct ahc_softc * ahc, long port,
uint8_t *, int count);
void ahc_insb(struct ahc_softc * ahc, long port,
uint8_t *, int count);
/**************************** Initialization **********************************/
int ahc_linux_register_host(struct ahc_softc *,
struct scsi_host_template *);
/******************************** Locking *************************************/
/* Lock protecting internal data structures */
static inline void
ahc_lockinit(struct ahc_softc *ahc)
{
spin_lock_init(&ahc->platform_data->spin_lock);
}
static inline void
ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
{
spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);
}
static inline void
ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
{
spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);
}
/******************************* PCI Definitions ******************************/
/*
* PCIM_xxx: mask to locate subfield in register
* PCIR_xxx: config register offset
* PCIC_xxx: device class
* PCIS_xxx: device subclass
* PCIP_xxx: device programming interface
* PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
* PCID_xxx: device ID
*/
#define PCIR_DEVVENDOR 0x00
#define PCIR_VENDOR 0x00
#define PCIR_DEVICE 0x02
#define PCIR_COMMAND 0x04
#define PCIM_CMD_PORTEN 0x0001
#define PCIM_CMD_MEMEN 0x0002
#define PCIM_CMD_BUSMASTEREN 0x0004
#define PCIM_CMD_MWRICEN 0x0010
#define PCIM_CMD_PERRESPEN 0x0040
#define PCIM_CMD_SERRESPEN 0x0100
#define PCIR_STATUS 0x06
#define PCIR_REVID 0x08
#define PCIR_PROGIF 0x09
#define PCIR_SUBCLASS 0x0a
#define PCIR_CLASS 0x0b
#define PCIR_CACHELNSZ 0x0c
#define PCIR_LATTIMER 0x0d
#define PCIR_HEADERTYPE 0x0e
#define PCIM_MFDEV 0x80
#define PCIR_BIST 0x0f
#define PCIR_CAP_PTR 0x34
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
#define PCIR_SUBVEND_0 0x2c
#define PCIR_SUBDEV_0 0x2e
typedef enum
{
AHC_POWER_STATE_D0,
AHC_POWER_STATE_D1,
AHC_POWER_STATE_D2,
AHC_POWER_STATE_D3
} ahc_power_state;
/**************************** VL/EISA Routines ********************************/
#ifdef CONFIG_EISA
int ahc_linux_eisa_init(void);
void ahc_linux_eisa_exit(void);
int aic7770_map_registers(struct ahc_softc *ahc,
u_int port);
int aic7770_map_int(struct ahc_softc *ahc, u_int irq);
#else
static inline int ahc_linux_eisa_init(void) {
return -ENODEV;
}
static inline void ahc_linux_eisa_exit(void) {
}
#endif
/******************************* PCI Routines *********************************/
#ifdef CONFIG_PCI
int ahc_linux_pci_init(void);
void ahc_linux_pci_exit(void);
int ahc_pci_map_registers(struct ahc_softc *ahc);
int ahc_pci_map_int(struct ahc_softc *ahc);
uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
int reg, int width);
void ahc_pci_write_config(ahc_dev_softc_t pci,
int reg, uint32_t value,
int width);
static inline int ahc_get_pci_function(ahc_dev_softc_t);
static inline int
ahc_get_pci_function(ahc_dev_softc_t pci)
{
return (PCI_FUNC(pci->devfn));
}
static inline int ahc_get_pci_slot(ahc_dev_softc_t);
static inline int
ahc_get_pci_slot(ahc_dev_softc_t pci)
{
return (PCI_SLOT(pci->devfn));
}
static inline int ahc_get_pci_bus(ahc_dev_softc_t);
static inline int
ahc_get_pci_bus(ahc_dev_softc_t pci)
{
return (pci->bus->number);
}
#else
static inline int ahc_linux_pci_init(void) {
return 0;
}
static inline void ahc_linux_pci_exit(void) {
}
#endif
static inline void ahc_flush_device_writes(struct ahc_softc *);
static inline void
ahc_flush_device_writes(struct ahc_softc *ahc)
{
/* XXX Is this sufficient for all architectures??? */
ahc_inb(ahc, INTSTAT);
}
/**************************** Proc FS Support *********************************/
int ahc_proc_write_seeprom(struct Scsi_Host *, char *, int);
int ahc_linux_show_info(struct seq_file *, struct Scsi_Host *);
/*************************** Domain Validation ********************************/
/*********************** Transaction Access Wrappers *************************/
static inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
static inline void ahc_set_transaction_status(struct scb *, uint32_t);
static inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
static inline void ahc_set_scsi_status(struct scb *, uint32_t);
static inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
static inline uint32_t ahc_get_transaction_status(struct scb *);
static inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
static inline uint32_t ahc_get_scsi_status(struct scb *);
static inline void ahc_set_transaction_tag(struct scb *, int, u_int);
static inline u_long ahc_get_transfer_length(struct scb *);
static inline int ahc_get_transfer_dir(struct scb *);
static inline void ahc_set_residual(struct scb *, u_long);
static inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
static inline u_long ahc_get_residual(struct scb *);
static inline u_long ahc_get_sense_residual(struct scb *);
static inline int ahc_perform_autosense(struct scb *);
static inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
struct scb *);
static inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
struct ahc_devinfo *);
static inline void ahc_platform_scb_free(struct ahc_softc *ahc,
struct scb *scb);
static inline void ahc_freeze_scb(struct scb *scb);
static inline
void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
{
cmd->result &= ~(CAM_STATUS_MASK << 16);
cmd->result |= status << 16;
}
static inline
void ahc_set_transaction_status(struct scb *scb, uint32_t status)
{
ahc_cmd_set_transaction_status(scb->io_ctx,status);
}
static inline
void ahc_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
{
cmd->result &= ~0xFFFF;
cmd->result |= status;
}
static inline
void ahc_set_scsi_status(struct scb *scb, uint32_t status)
{
ahc_cmd_set_scsi_status(scb->io_ctx, status);
}
static inline
uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd)
{
return ((cmd->result >> 16) & CAM_STATUS_MASK);
}
static inline
uint32_t ahc_get_transaction_status(struct scb *scb)
{
return (ahc_cmd_get_transaction_status(scb->io_ctx));
}
static inline
uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd)
{
return (cmd->result & 0xFFFF);
}
static inline
uint32_t ahc_get_scsi_status(struct scb *scb)
{
return (ahc_cmd_get_scsi_status(scb->io_ctx));
}
static inline
void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type)
{
/*
* Nothing to do for linux as the incoming transaction
* has no concept of tag/non tagged, etc.
*/
}
static inline
u_long ahc_get_transfer_length(struct scb *scb)
{
return (scb->platform_data->xfer_len);
}
static inline
int ahc_get_transfer_dir(struct scb *scb)
{
return (scb->io_ctx->sc_data_direction);
}
static inline
void ahc_set_residual(struct scb *scb, u_long resid)
{
scsi_set_resid(scb->io_ctx, resid);
}
static inline
void ahc_set_sense_residual(struct scb *scb, u_long resid)
{
scb->platform_data->sense_resid = resid;
}
static inline
u_long ahc_get_residual(struct scb *scb)
{
return scsi_get_resid(scb->io_ctx);
}
static inline
u_long ahc_get_sense_residual(struct scb *scb)
{
return (scb->platform_data->sense_resid);
}
static inline
int ahc_perform_autosense(struct scb *scb)
{
/*
* We always perform autosense in Linux.
* On other platforms this is set on a
* per-transaction basis.
*/
return (1);
}
static inline uint32_t
ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb)
{
return (sizeof(struct scsi_sense_data));
}
static inline void
ahc_notify_xfer_settings_change(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo)
{
/* Nothing to do here for linux */
}
static inline void
ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb)
{
}
int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg);
void ahc_platform_free(struct ahc_softc *ahc);
void ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
static inline void
ahc_freeze_scb(struct scb *scb)
{
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
scb->platform_data->dev->qfrozen++;
}
}
void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
struct ahc_devinfo *devinfo, ahc_queue_alg);
int ahc_platform_abort_scbs(struct ahc_softc *ahc, int target,
char channel, int lun, u_int tag,
role_t role, uint32_t status);
irqreturn_t
ahc_linux_isr(int irq, void *dev_id);
void ahc_platform_flushwork(struct ahc_softc *ahc);
void ahc_done(struct ahc_softc*, struct scb*);
void ahc_send_async(struct ahc_softc *, char channel,
u_int target, u_int lun, ac_code);
void ahc_print_path(struct ahc_softc *, struct scb *);
void ahc_platform_dump_card_state(struct ahc_softc *ahc);
#ifdef CONFIG_PCI
#define AHC_PCI_CONFIG 1
#else
#define AHC_PCI_CONFIG 0
#endif
#define bootverbose aic7xxx_verbose
extern u_int aic7xxx_verbose;
#endif /* _AIC7XXX_LINUX_H_ */
| null | null | null | null | 92,185 |
28,695 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 28,695 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/* pngwutil.c - utilities to write a PNG file
*
* Last changed in libpng 1.6.32 [August 24, 2017]
* Copyright (c) 1998-2002,2004,2006-2017 Glenn Randers-Pehrson
* (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger)
* (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.)
*
* This code is released under the libpng license.
* For conditions of distribution and use, see the disclaimer
* and license in png.h
*/
#include "pngpriv.h"
#ifdef PNG_WRITE_SUPPORTED
#ifdef PNG_WRITE_INT_FUNCTIONS_SUPPORTED
/* Place a 32-bit number into a buffer in PNG byte order. We work
* with unsigned numbers for convenience, although one supported
* ancillary chunk uses signed (two's complement) numbers.
*/
void PNGAPI
png_save_uint_32(png_bytep buf, png_uint_32 i)
{
buf[0] = (png_byte)((i >> 24) & 0xffU);
buf[1] = (png_byte)((i >> 16) & 0xffU);
buf[2] = (png_byte)((i >> 8) & 0xffU);
buf[3] = (png_byte)( i & 0xffU);
}
/* Place a 16-bit number into a buffer in PNG byte order.
* The parameter is declared unsigned int, not png_uint_16,
* just to avoid potential problems on pre-ANSI C compilers.
*/
void PNGAPI
png_save_uint_16(png_bytep buf, unsigned int i)
{
buf[0] = (png_byte)((i >> 8) & 0xffU);
buf[1] = (png_byte)( i & 0xffU);
}
#endif
/* Simple function to write the signature. If we have already written
* the magic bytes of the signature, or more likely, the PNG stream is
* being embedded into another stream and doesn't need its own signature,
* we should call png_set_sig_bytes() to tell libpng how many of the
* bytes have already been written.
*/
void PNGAPI
png_write_sig(png_structrp png_ptr)
{
png_byte png_signature[8] = {137, 80, 78, 71, 13, 10, 26, 10};
#ifdef PNG_IO_STATE_SUPPORTED
/* Inform the I/O callback that the signature is being written */
png_ptr->io_state = PNG_IO_WRITING | PNG_IO_SIGNATURE;
#endif
/* Write the rest of the 8 byte signature */
png_write_data(png_ptr, &png_signature[png_ptr->sig_bytes],
(png_size_t)(8 - png_ptr->sig_bytes));
if (png_ptr->sig_bytes < 3)
png_ptr->mode |= PNG_HAVE_PNG_SIGNATURE;
}
/* Write the start of a PNG chunk. The type is the chunk type.
* The total_length is the sum of the lengths of all the data you will be
* passing in png_write_chunk_data().
*/
static void
png_write_chunk_header(png_structrp png_ptr, png_uint_32 chunk_name,
png_uint_32 length)
{
png_byte buf[8];
#if defined(PNG_DEBUG) && (PNG_DEBUG > 0)
PNG_CSTRING_FROM_CHUNK(buf, chunk_name);
png_debug2(0, "Writing %s chunk, length = %lu", buf, (unsigned long)length);
#endif
if (png_ptr == NULL)
return;
#ifdef PNG_IO_STATE_SUPPORTED
/* Inform the I/O callback that the chunk header is being written.
* PNG_IO_CHUNK_HDR requires a single I/O call.
*/
png_ptr->io_state = PNG_IO_WRITING | PNG_IO_CHUNK_HDR;
#endif
/* Write the length and the chunk name */
png_save_uint_32(buf, length);
png_save_uint_32(buf + 4, chunk_name);
png_write_data(png_ptr, buf, 8);
/* Put the chunk name into png_ptr->chunk_name */
png_ptr->chunk_name = chunk_name;
/* Reset the crc and run it over the chunk name */
png_reset_crc(png_ptr);
png_calculate_crc(png_ptr, buf + 4, 4);
#ifdef PNG_IO_STATE_SUPPORTED
/* Inform the I/O callback that chunk data will (possibly) be written.
* PNG_IO_CHUNK_DATA does NOT require a specific number of I/O calls.
*/
png_ptr->io_state = PNG_IO_WRITING | PNG_IO_CHUNK_DATA;
#endif
}
void PNGAPI
png_write_chunk_start(png_structrp png_ptr, png_const_bytep chunk_string,
png_uint_32 length)
{
png_write_chunk_header(png_ptr, PNG_CHUNK_FROM_STRING(chunk_string), length);
}
/* Write the data of a PNG chunk started with png_write_chunk_header().
* Note that multiple calls to this function are allowed, and that the
* sum of the lengths from these calls *must* add up to the total_length
* given to png_write_chunk_header().
*/
void PNGAPI
png_write_chunk_data(png_structrp png_ptr, png_const_bytep data,
png_size_t length)
{
/* Write the data, and run the CRC over it */
if (png_ptr == NULL)
return;
if (data != NULL && length > 0)
{
png_write_data(png_ptr, data, length);
/* Update the CRC after writing the data,
* in case the user I/O routine alters it.
*/
png_calculate_crc(png_ptr, data, length);
}
}
/* Finish a chunk started with png_write_chunk_header(). */
void PNGAPI
png_write_chunk_end(png_structrp png_ptr)
{
png_byte buf[4];
if (png_ptr == NULL) return;
#ifdef PNG_IO_STATE_SUPPORTED
/* Inform the I/O callback that the chunk CRC is being written.
* PNG_IO_CHUNK_CRC requires a single I/O function call.
*/
png_ptr->io_state = PNG_IO_WRITING | PNG_IO_CHUNK_CRC;
#endif
/* Write the crc in a single operation */
png_save_uint_32(buf, png_ptr->crc);
png_write_data(png_ptr, buf, (png_size_t)4);
}
/* Write a PNG chunk all at once. The type is an array of ASCII characters
* representing the chunk name. The array must be at least 4 bytes in
* length, and does not need to be null terminated. To be safe, pass the
* pre-defined chunk names here, and if you need a new one, define it
* where the others are defined. The length is the length of the data.
* All the data must be present. If that is not possible, use the
* png_write_chunk_start(), png_write_chunk_data(), and png_write_chunk_end()
* functions instead.
*/
static void
png_write_complete_chunk(png_structrp png_ptr, png_uint_32 chunk_name,
png_const_bytep data, png_size_t length)
{
if (png_ptr == NULL)
return;
/* On 64-bit architectures 'length' may not fit in a png_uint_32. */
if (length > PNG_UINT_31_MAX)
png_error(png_ptr, "length exceeds PNG maximum");
png_write_chunk_header(png_ptr, chunk_name, (png_uint_32)length);
png_write_chunk_data(png_ptr, data, length);
png_write_chunk_end(png_ptr);
}
/* This is the API that calls the internal function above. */
void PNGAPI
png_write_chunk(png_structrp png_ptr, png_const_bytep chunk_string,
png_const_bytep data, png_size_t length)
{
png_write_complete_chunk(png_ptr, PNG_CHUNK_FROM_STRING(chunk_string), data,
length);
}
/* This is used below to find the size of an image to pass to png_deflate_claim,
* so it only needs to be accurate if the size is less than 16384 bytes (the
* point at which a lower LZ window size can be used.)
*/
static png_alloc_size_t
png_image_size(png_structrp png_ptr)
{
/* Only return sizes up to the maximum of a png_uint_32; do this by limiting
* the width and height used to 15 bits.
*/
png_uint_32 h = png_ptr->height;
if (png_ptr->rowbytes < 32768 && h < 32768)
{
if (png_ptr->interlaced != 0)
{
/* Interlacing makes the image larger because of the replication of
* both the filter byte and the padding to a byte boundary.
*/
png_uint_32 w = png_ptr->width;
unsigned int pd = png_ptr->pixel_depth;
png_alloc_size_t cb_base;
int pass;
for (cb_base=0, pass=0; pass<=6; ++pass)
{
png_uint_32 pw = PNG_PASS_COLS(w, pass);
if (pw > 0)
cb_base += (PNG_ROWBYTES(pd, pw)+1) * PNG_PASS_ROWS(h, pass);
}
return cb_base;
}
else
return (png_ptr->rowbytes+1) * h;
}
else
return 0xffffffffU;
}
#ifdef PNG_WRITE_OPTIMIZE_CMF_SUPPORTED
/* This is the code to hack the first two bytes of the deflate stream (the
* deflate header) to correct the windowBits value to match the actual data
* size. Note that the second argument is the *uncompressed* size but the
* first argument is the *compressed* data (and it must be deflate
* compressed.)
*/
static void
optimize_cmf(png_bytep data, png_alloc_size_t data_size)
{
/* Optimize the CMF field in the zlib stream. The resultant zlib stream is
* still compliant to the stream specification.
*/
if (data_size <= 16384) /* else windowBits must be 15 */
{
unsigned int z_cmf = data[0]; /* zlib compression method and flags */
if ((z_cmf & 0x0f) == 8 && (z_cmf & 0xf0) <= 0x70)
{
unsigned int z_cinfo;
unsigned int half_z_window_size;
z_cinfo = z_cmf >> 4;
half_z_window_size = 1U << (z_cinfo + 7);
if (data_size <= half_z_window_size) /* else no change */
{
unsigned int tmp;
do
{
half_z_window_size >>= 1;
--z_cinfo;
}
while (z_cinfo > 0 && data_size <= half_z_window_size);
z_cmf = (z_cmf & 0x0f) | (z_cinfo << 4);
data[0] = (png_byte)z_cmf;
tmp = data[1] & 0xe0;
tmp += 0x1f - ((z_cmf << 8) + tmp) % 0x1f;
data[1] = (png_byte)tmp;
}
}
}
}
#endif /* WRITE_OPTIMIZE_CMF */
/* Initialize the compressor for the appropriate type of compression. */
static int
png_deflate_claim(png_structrp png_ptr, png_uint_32 owner,
png_alloc_size_t data_size)
{
if (png_ptr->zowner != 0)
{
#if defined(PNG_WARNINGS_SUPPORTED) || defined(PNG_ERROR_TEXT_SUPPORTED)
char msg[64];
PNG_STRING_FROM_CHUNK(msg, owner);
msg[4] = ':';
msg[5] = ' ';
PNG_STRING_FROM_CHUNK(msg+6, png_ptr->zowner);
/* So the message that results is "<chunk> using zstream"; this is an
* internal error, but is very useful for debugging. i18n requirements
* are minimal.
*/
(void)png_safecat(msg, (sizeof msg), 10, " using zstream");
#endif
#if PNG_RELEASE_BUILD
png_warning(png_ptr, msg);
/* Attempt sane error recovery */
if (png_ptr->zowner == png_IDAT) /* don't steal from IDAT */
{
png_ptr->zstream.msg = PNGZ_MSG_CAST("in use by IDAT");
return Z_STREAM_ERROR;
}
png_ptr->zowner = 0;
#else
png_error(png_ptr, msg);
#endif
}
{
int level = png_ptr->zlib_level;
int method = png_ptr->zlib_method;
int windowBits = png_ptr->zlib_window_bits;
int memLevel = png_ptr->zlib_mem_level;
int strategy; /* set below */
int ret; /* zlib return code */
if (owner == png_IDAT)
{
if ((png_ptr->flags & PNG_FLAG_ZLIB_CUSTOM_STRATEGY) != 0)
strategy = png_ptr->zlib_strategy;
else if (png_ptr->do_filter != PNG_FILTER_NONE)
strategy = PNG_Z_DEFAULT_STRATEGY;
else
strategy = PNG_Z_DEFAULT_NOFILTER_STRATEGY;
}
else
{
#ifdef PNG_WRITE_CUSTOMIZE_ZTXT_COMPRESSION_SUPPORTED
level = png_ptr->zlib_text_level;
method = png_ptr->zlib_text_method;
windowBits = png_ptr->zlib_text_window_bits;
memLevel = png_ptr->zlib_text_mem_level;
strategy = png_ptr->zlib_text_strategy;
#else
/* If customization is not supported the values all come from the
* IDAT values except for the strategy, which is fixed to the
* default. (This is the pre-1.6.0 behavior too, although it was
* implemented in a very different way.)
*/
strategy = Z_DEFAULT_STRATEGY;
#endif
}
/* Adjust 'windowBits' down if larger than 'data_size'; to stop this
* happening just pass 32768 as the data_size parameter. Notice that zlib
* requires an extra 262 bytes in the window in addition to the data to be
* able to see the whole of the data, so if data_size+262 takes us to the
* next windowBits size we need to fix up the value later. (Because even
* though deflate needs the extra window, inflate does not!)
*/
if (data_size <= 16384)
{
/* IMPLEMENTATION NOTE: this 'half_window_size' stuff is only here to
* work round a Microsoft Visual C misbehavior which, contrary to C-90,
* widens the result of the following shift to 64-bits if (and,
* apparently, only if) it is used in a test.
*/
unsigned int half_window_size = 1U << (windowBits-1);
while (data_size + 262 <= half_window_size)
{
half_window_size >>= 1;
--windowBits;
}
}
/* Check against the previous initialized values, if any. */
if ((png_ptr->flags & PNG_FLAG_ZSTREAM_INITIALIZED) != 0 &&
(png_ptr->zlib_set_level != level ||
png_ptr->zlib_set_method != method ||
png_ptr->zlib_set_window_bits != windowBits ||
png_ptr->zlib_set_mem_level != memLevel ||
png_ptr->zlib_set_strategy != strategy))
{
if (deflateEnd(&png_ptr->zstream) != Z_OK)
png_warning(png_ptr, "deflateEnd failed (ignored)");
png_ptr->flags &= ~PNG_FLAG_ZSTREAM_INITIALIZED;
}
/* For safety clear out the input and output pointers (currently zlib
* doesn't use them on Init, but it might in the future).
*/
png_ptr->zstream.next_in = NULL;
png_ptr->zstream.avail_in = 0;
png_ptr->zstream.next_out = NULL;
png_ptr->zstream.avail_out = 0;
/* Now initialize if required, setting the new parameters, otherwise just
* do a simple reset to the previous parameters.
*/
if ((png_ptr->flags & PNG_FLAG_ZSTREAM_INITIALIZED) != 0)
ret = deflateReset(&png_ptr->zstream);
else
{
ret = deflateInit2(&png_ptr->zstream, level, method, windowBits,
memLevel, strategy);
if (ret == Z_OK)
png_ptr->flags |= PNG_FLAG_ZSTREAM_INITIALIZED;
}
/* The return code is from either deflateReset or deflateInit2; they have
* pretty much the same set of error codes.
*/
if (ret == Z_OK)
png_ptr->zowner = owner;
else
png_zstream_error(png_ptr, ret);
return ret;
}
}
/* Clean up (or trim) a linked list of compression buffers. */
void /* PRIVATE */
png_free_buffer_list(png_structrp png_ptr, png_compression_bufferp *listp)
{
png_compression_bufferp list = *listp;
if (list != NULL)
{
*listp = NULL;
do
{
png_compression_bufferp next = list->next;
png_free(png_ptr, list);
list = next;
}
while (list != NULL);
}
}
#ifdef PNG_WRITE_COMPRESSED_TEXT_SUPPORTED
/* This pair of functions encapsulates the operation of (a) compressing a
* text string, and (b) issuing it later as a series of chunk data writes.
* The compression_state structure is shared context for these functions
* set up by the caller to allow access to the relevant local variables.
*
* compression_buffer (new in 1.6.0) is just a linked list of zbuffer_size
* temporary buffers. From 1.6.0 it is retained in png_struct so that it will
* be correctly freed in the event of a write error (previous implementations
* just leaked memory.)
*/
typedef struct
{
png_const_bytep input; /* The uncompressed input data */
png_alloc_size_t input_len; /* Its length */
png_uint_32 output_len; /* Final compressed length */
png_byte output[1024]; /* First block of output */
} compression_state;
static void
png_text_compress_init(compression_state *comp, png_const_bytep input,
png_alloc_size_t input_len)
{
comp->input = input;
comp->input_len = input_len;
comp->output_len = 0;
}
/* Compress the data in the compression state input */
static int
png_text_compress(png_structrp png_ptr, png_uint_32 chunk_name,
compression_state *comp, png_uint_32 prefix_len)
{
int ret;
/* To find the length of the output it is necessary to first compress the
* input. The result is buffered rather than using the two-pass algorithm
* that is used on the inflate side; deflate is assumed to be slower and a
* PNG writer is assumed to have more memory available than a PNG reader.
*
* IMPLEMENTATION NOTE: the zlib API deflateBound() can be used to find an
* upper limit on the output size, but it is always bigger than the input
* size so it is likely to be more efficient to use this linked-list
* approach.
*/
ret = png_deflate_claim(png_ptr, chunk_name, comp->input_len);
if (ret != Z_OK)
return ret;
/* Set up the compression buffers, we need a loop here to avoid overflowing a
* uInt. Use ZLIB_IO_MAX to limit the input. The output is always limited
* by the output buffer size, so there is no need to check that. Since this
* is ANSI-C we know that an 'int', hence a uInt, is always at least 16 bits
* in size.
*/
{
png_compression_bufferp *end = &png_ptr->zbuffer_list;
png_alloc_size_t input_len = comp->input_len; /* may be zero! */
png_uint_32 output_len;
/* zlib updates these for us: */
png_ptr->zstream.next_in = PNGZ_INPUT_CAST(comp->input);
png_ptr->zstream.avail_in = 0; /* Set below */
png_ptr->zstream.next_out = comp->output;
png_ptr->zstream.avail_out = (sizeof comp->output);
output_len = png_ptr->zstream.avail_out;
do
{
uInt avail_in = ZLIB_IO_MAX;
if (avail_in > input_len)
avail_in = (uInt)input_len;
input_len -= avail_in;
png_ptr->zstream.avail_in = avail_in;
if (png_ptr->zstream.avail_out == 0)
{
png_compression_buffer *next;
/* Chunk data is limited to 2^31 bytes in length, so the prefix
* length must be counted here.
*/
if (output_len + prefix_len > PNG_UINT_31_MAX)
{
ret = Z_MEM_ERROR;
break;
}
/* Need a new (malloc'ed) buffer, but there may be one present
* already.
*/
next = *end;
if (next == NULL)
{
next = png_voidcast(png_compression_bufferp, png_malloc_base
(png_ptr, PNG_COMPRESSION_BUFFER_SIZE(png_ptr)));
if (next == NULL)
{
ret = Z_MEM_ERROR;
break;
}
/* Link in this buffer (so that it will be freed later) */
next->next = NULL;
*end = next;
}
png_ptr->zstream.next_out = next->output;
png_ptr->zstream.avail_out = png_ptr->zbuffer_size;
output_len += png_ptr->zstream.avail_out;
/* Move 'end' to the next buffer pointer. */
end = &next->next;
}
/* Compress the data */
ret = deflate(&png_ptr->zstream,
input_len > 0 ? Z_NO_FLUSH : Z_FINISH);
/* Claw back input data that was not consumed (because avail_in is
* reset above every time round the loop).
*/
input_len += png_ptr->zstream.avail_in;
png_ptr->zstream.avail_in = 0; /* safety */
}
while (ret == Z_OK);
/* There may be some space left in the last output buffer. This needs to
* be subtracted from output_len.
*/
output_len -= png_ptr->zstream.avail_out;
png_ptr->zstream.avail_out = 0; /* safety */
comp->output_len = output_len;
/* Now double check the output length, put in a custom message if it is
* too long. Otherwise ensure the z_stream::msg pointer is set to
* something.
*/
if (output_len + prefix_len >= PNG_UINT_31_MAX)
{
png_ptr->zstream.msg = PNGZ_MSG_CAST("compressed data too long");
ret = Z_MEM_ERROR;
}
else
png_zstream_error(png_ptr, ret);
/* Reset zlib for another zTXt/iTXt or image data */
png_ptr->zowner = 0;
/* The only success case is Z_STREAM_END, input_len must be 0; if not this
* is an internal error.
*/
if (ret == Z_STREAM_END && input_len == 0)
{
#ifdef PNG_WRITE_OPTIMIZE_CMF_SUPPORTED
/* Fix up the deflate header, if required */
optimize_cmf(comp->output, comp->input_len);
#endif
/* But Z_OK is returned, not Z_STREAM_END; this allows the claim
* function above to return Z_STREAM_END on an error (though it never
* does in the current versions of zlib.)
*/
return Z_OK;
}
else
return ret;
}
}
/* Ship the compressed text out via chunk writes */
static void
png_write_compressed_data_out(png_structrp png_ptr, compression_state *comp)
{
png_uint_32 output_len = comp->output_len;
png_const_bytep output = comp->output;
png_uint_32 avail = (sizeof comp->output);
png_compression_buffer *next = png_ptr->zbuffer_list;
for (;;)
{
if (avail > output_len)
avail = output_len;
png_write_chunk_data(png_ptr, output, avail);
output_len -= avail;
if (output_len == 0 || next == NULL)
break;
avail = png_ptr->zbuffer_size;
output = next->output;
next = next->next;
}
/* This is an internal error; 'next' must have been NULL! */
if (output_len > 0)
png_error(png_ptr, "error writing ancillary chunked compressed data");
}
#endif /* WRITE_COMPRESSED_TEXT */
/* Write the IHDR chunk, and update the png_struct with the necessary
* information. Note that the rest of this code depends upon this
* information being correct.
*/
void /* PRIVATE */
png_write_IHDR(png_structrp png_ptr, png_uint_32 width, png_uint_32 height,
int bit_depth, int color_type, int compression_type, int filter_type,
int interlace_type)
{
png_byte buf[13]; /* Buffer to store the IHDR info */
int is_invalid_depth;
png_debug(1, "in png_write_IHDR");
/* Check that we have valid input data from the application info */
switch (color_type)
{
case PNG_COLOR_TYPE_GRAY:
switch (bit_depth)
{
case 1:
case 2:
case 4:
case 8:
#ifdef PNG_WRITE_16BIT_SUPPORTED
case 16:
#endif
png_ptr->channels = 1; break;
default:
png_error(png_ptr,
"Invalid bit depth for grayscale image");
}
break;
case PNG_COLOR_TYPE_RGB:
is_invalid_depth = (bit_depth != 8);
#ifdef PNG_WRITE_16BIT_SUPPORTED
is_invalid_depth = (is_invalid_depth && bit_depth != 16);
#endif
if (is_invalid_depth)
png_error(png_ptr, "Invalid bit depth for RGB image");
png_ptr->channels = 3;
break;
case PNG_COLOR_TYPE_PALETTE:
switch (bit_depth)
{
case 1:
case 2:
case 4:
case 8:
png_ptr->channels = 1;
break;
default:
png_error(png_ptr, "Invalid bit depth for paletted image");
}
break;
case PNG_COLOR_TYPE_GRAY_ALPHA:
is_invalid_depth = (bit_depth != 8);
#ifdef PNG_WRITE_16BIT_SUPPORTED
is_invalid_depth = (is_invalid_depth && bit_depth != 16);
#endif
if (is_invalid_depth)
png_error(png_ptr, "Invalid bit depth for grayscale+alpha image");
png_ptr->channels = 2;
break;
case PNG_COLOR_TYPE_RGB_ALPHA:
is_invalid_depth = (bit_depth != 8);
#ifdef PNG_WRITE_16BIT_SUPPORTED
is_invalid_depth = (is_invalid_depth && bit_depth != 16);
#endif
if (is_invalid_depth)
png_error(png_ptr, "Invalid bit depth for RGBA image");
png_ptr->channels = 4;
break;
default:
png_error(png_ptr, "Invalid image color type specified");
}
if (compression_type != PNG_COMPRESSION_TYPE_BASE)
{
png_warning(png_ptr, "Invalid compression type specified");
compression_type = PNG_COMPRESSION_TYPE_BASE;
}
/* Write filter_method 64 (intrapixel differencing) only if
* 1. Libpng was compiled with PNG_MNG_FEATURES_SUPPORTED and
* 2. Libpng did not write a PNG signature (this filter_method is only
* used in PNG datastreams that are embedded in MNG datastreams) and
* 3. The application called png_permit_mng_features with a mask that
* included PNG_FLAG_MNG_FILTER_64 and
* 4. The filter_method is 64 and
* 5. The color_type is RGB or RGBA
*/
if (
#ifdef PNG_MNG_FEATURES_SUPPORTED
!((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) != 0 &&
((png_ptr->mode & PNG_HAVE_PNG_SIGNATURE) == 0) &&
(color_type == PNG_COLOR_TYPE_RGB ||
color_type == PNG_COLOR_TYPE_RGB_ALPHA) &&
(filter_type == PNG_INTRAPIXEL_DIFFERENCING)) &&
#endif
filter_type != PNG_FILTER_TYPE_BASE)
{
png_warning(png_ptr, "Invalid filter type specified");
filter_type = PNG_FILTER_TYPE_BASE;
}
#ifdef PNG_WRITE_INTERLACING_SUPPORTED
if (interlace_type != PNG_INTERLACE_NONE &&
interlace_type != PNG_INTERLACE_ADAM7)
{
png_warning(png_ptr, "Invalid interlace type specified");
interlace_type = PNG_INTERLACE_ADAM7;
}
#else
interlace_type=PNG_INTERLACE_NONE;
#endif
/* Save the relevant information */
png_ptr->bit_depth = (png_byte)bit_depth;
png_ptr->color_type = (png_byte)color_type;
png_ptr->interlaced = (png_byte)interlace_type;
#ifdef PNG_MNG_FEATURES_SUPPORTED
png_ptr->filter_type = (png_byte)filter_type;
#endif
png_ptr->compression_type = (png_byte)compression_type;
png_ptr->width = width;
png_ptr->height = height;
png_ptr->pixel_depth = (png_byte)(bit_depth * png_ptr->channels);
png_ptr->rowbytes = PNG_ROWBYTES(png_ptr->pixel_depth, width);
/* Set the usr info, so any transformations can modify it */
png_ptr->usr_width = png_ptr->width;
png_ptr->usr_bit_depth = png_ptr->bit_depth;
png_ptr->usr_channels = png_ptr->channels;
/* Pack the header information into the buffer */
png_save_uint_32(buf, width);
png_save_uint_32(buf + 4, height);
buf[8] = (png_byte)bit_depth;
buf[9] = (png_byte)color_type;
buf[10] = (png_byte)compression_type;
buf[11] = (png_byte)filter_type;
buf[12] = (png_byte)interlace_type;
/* Write the chunk */
png_write_complete_chunk(png_ptr, png_IHDR, buf, (png_size_t)13);
if ((png_ptr->do_filter) == PNG_NO_FILTERS)
{
if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE ||
png_ptr->bit_depth < 8)
png_ptr->do_filter = PNG_FILTER_NONE;
else
png_ptr->do_filter = PNG_ALL_FILTERS;
}
png_ptr->mode = PNG_HAVE_IHDR; /* not READY_FOR_ZTXT */
}
/* Write the palette. We are careful not to trust png_color to be in the
* correct order for PNG, so people can redefine it to any convenient
* structure.
*/
void /* PRIVATE */
png_write_PLTE(png_structrp png_ptr, png_const_colorp palette,
png_uint_32 num_pal)
{
png_uint_32 max_palette_length, i;
png_const_colorp pal_ptr;
png_byte buf[3];
png_debug(1, "in png_write_PLTE");
max_palette_length = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
(1 << png_ptr->bit_depth) : PNG_MAX_PALETTE_LENGTH;
if ((
#ifdef PNG_MNG_FEATURES_SUPPORTED
(png_ptr->mng_features_permitted & PNG_FLAG_MNG_EMPTY_PLTE) == 0 &&
#endif
num_pal == 0) || num_pal > max_palette_length)
{
if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
{
png_error(png_ptr, "Invalid number of colors in palette");
}
else
{
png_warning(png_ptr, "Invalid number of colors in palette");
return;
}
}
if ((png_ptr->color_type & PNG_COLOR_MASK_COLOR) == 0)
{
png_warning(png_ptr,
"Ignoring request to write a PLTE chunk in grayscale PNG");
return;
}
png_ptr->num_palette = (png_uint_16)num_pal;
png_debug1(3, "num_palette = %d", png_ptr->num_palette);
png_write_chunk_header(png_ptr, png_PLTE, (png_uint_32)(num_pal * 3));
#ifdef PNG_POINTER_INDEXING_SUPPORTED
for (i = 0, pal_ptr = palette; i < num_pal; i++, pal_ptr++)
{
buf[0] = pal_ptr->red;
buf[1] = pal_ptr->green;
buf[2] = pal_ptr->blue;
png_write_chunk_data(png_ptr, buf, (png_size_t)3);
}
#else
/* This is a little slower but some buggy compilers need to do this
* instead
*/
pal_ptr=palette;
for (i = 0; i < num_pal; i++)
{
buf[0] = pal_ptr[i].red;
buf[1] = pal_ptr[i].green;
buf[2] = pal_ptr[i].blue;
png_write_chunk_data(png_ptr, buf, (png_size_t)3);
}
#endif
png_write_chunk_end(png_ptr);
png_ptr->mode |= PNG_HAVE_PLTE;
}
/* This is similar to png_text_compress, above, except that it does not require
* all of the data at once and, instead of buffering the compressed result,
* writes it as IDAT chunks. Unlike png_text_compress it *can* png_error out
* because it calls the write interface. As a result it does its own error
* reporting and does not return an error code. In the event of error it will
* just call png_error. The input data length may exceed 32-bits. The 'flush'
* parameter is exactly the same as that to deflate, with the following
* meanings:
*
* Z_NO_FLUSH: normal incremental output of compressed data
* Z_SYNC_FLUSH: do a SYNC_FLUSH, used by png_write_flush
* Z_FINISH: this is the end of the input, do a Z_FINISH and clean up
*
* The routine manages the acquire and release of the png_ptr->zstream by
* checking and (at the end) clearing png_ptr->zowner; it does some sanity
* checks on the 'mode' flags while doing this.
*/
void /* PRIVATE */
png_compress_IDAT(png_structrp png_ptr, png_const_bytep input,
png_alloc_size_t input_len, int flush)
{
if (png_ptr->zowner != png_IDAT)
{
/* First time. Ensure we have a temporary buffer for compression and
* trim the buffer list if it has more than one entry to free memory.
* If 'WRITE_COMPRESSED_TEXT' is not set the list will never have been
* created at this point, but the check here is quick and safe.
*/
if (png_ptr->zbuffer_list == NULL)
{
png_ptr->zbuffer_list = png_voidcast(png_compression_bufferp,
png_malloc(png_ptr, PNG_COMPRESSION_BUFFER_SIZE(png_ptr)));
png_ptr->zbuffer_list->next = NULL;
}
else
png_free_buffer_list(png_ptr, &png_ptr->zbuffer_list->next);
/* It is a terminal error if we can't claim the zstream. */
if (png_deflate_claim(png_ptr, png_IDAT, png_image_size(png_ptr)) != Z_OK)
png_error(png_ptr, png_ptr->zstream.msg);
/* The output state is maintained in png_ptr->zstream, so it must be
* initialized here after the claim.
*/
png_ptr->zstream.next_out = png_ptr->zbuffer_list->output;
png_ptr->zstream.avail_out = png_ptr->zbuffer_size;
}
/* Now loop reading and writing until all the input is consumed or an error
* terminates the operation. The _out values are maintained across calls to
* this function, but the input must be reset each time.
*/
png_ptr->zstream.next_in = PNGZ_INPUT_CAST(input);
png_ptr->zstream.avail_in = 0; /* set below */
for (;;)
{
int ret;
/* INPUT: from the row data */
uInt avail = ZLIB_IO_MAX;
if (avail > input_len)
avail = (uInt)input_len; /* safe because of the check */
png_ptr->zstream.avail_in = avail;
input_len -= avail;
ret = deflate(&png_ptr->zstream, input_len > 0 ? Z_NO_FLUSH : flush);
/* Include as-yet unconsumed input */
input_len += png_ptr->zstream.avail_in;
png_ptr->zstream.avail_in = 0;
/* OUTPUT: write complete IDAT chunks when avail_out drops to zero. Note
* that these two zstream fields are preserved across the calls, therefore
* there is no need to set these up on entry to the loop.
*/
if (png_ptr->zstream.avail_out == 0)
{
png_bytep data = png_ptr->zbuffer_list->output;
uInt size = png_ptr->zbuffer_size;
/* Write an IDAT containing the data then reset the buffer. The
* first IDAT may need deflate header optimization.
*/
#ifdef PNG_WRITE_OPTIMIZE_CMF_SUPPORTED
if ((png_ptr->mode & PNG_HAVE_IDAT) == 0 &&
png_ptr->compression_type == PNG_COMPRESSION_TYPE_BASE)
optimize_cmf(data, png_image_size(png_ptr));
#endif
if (size > 0)
png_write_complete_chunk(png_ptr, png_IDAT, data, size);
png_ptr->mode |= PNG_HAVE_IDAT;
png_ptr->zstream.next_out = data;
png_ptr->zstream.avail_out = size;
/* For SYNC_FLUSH or FINISH it is essential to keep calling zlib with
* the same flush parameter until it has finished output, for NO_FLUSH
* it doesn't matter.
*/
if (ret == Z_OK && flush != Z_NO_FLUSH)
continue;
}
/* The order of these checks doesn't matter much; it just affects which
* possible error might be detected if multiple things go wrong at once.
*/
if (ret == Z_OK) /* most likely return code! */
{
/* If all the input has been consumed then just return. If Z_FINISH
* was used as the flush parameter something has gone wrong if we get
* here.
*/
if (input_len == 0)
{
if (flush == Z_FINISH)
png_error(png_ptr, "Z_OK on Z_FINISH with output space");
return;
}
}
else if (ret == Z_STREAM_END && flush == Z_FINISH)
{
/* This is the end of the IDAT data; any pending output must be
* flushed. For small PNG files we may still be at the beginning.
*/
png_bytep data = png_ptr->zbuffer_list->output;
uInt size = png_ptr->zbuffer_size - png_ptr->zstream.avail_out;
#ifdef PNG_WRITE_OPTIMIZE_CMF_SUPPORTED
if ((png_ptr->mode & PNG_HAVE_IDAT) == 0 &&
png_ptr->compression_type == PNG_COMPRESSION_TYPE_BASE)
optimize_cmf(data, png_image_size(png_ptr));
#endif
if (size > 0)
png_write_complete_chunk(png_ptr, png_IDAT, data, size);
png_ptr->zstream.avail_out = 0;
png_ptr->zstream.next_out = NULL;
png_ptr->mode |= PNG_HAVE_IDAT | PNG_AFTER_IDAT;
png_ptr->zowner = 0; /* Release the stream */
return;
}
else
{
/* This is an error condition. */
png_zstream_error(png_ptr, ret);
png_error(png_ptr, png_ptr->zstream.msg);
}
}
}
/* Write an IEND chunk */
void /* PRIVATE */
png_write_IEND(png_structrp png_ptr)
{
png_debug(1, "in png_write_IEND");
png_write_complete_chunk(png_ptr, png_IEND, NULL, (png_size_t)0);
png_ptr->mode |= PNG_HAVE_IEND;
}
#ifdef PNG_WRITE_gAMA_SUPPORTED
/* Write a gAMA chunk */
void /* PRIVATE */
png_write_gAMA_fixed(png_structrp png_ptr, png_fixed_point file_gamma)
{
png_byte buf[4];
png_debug(1, "in png_write_gAMA");
/* file_gamma is saved in 1/100,000ths */
png_save_uint_32(buf, (png_uint_32)file_gamma);
png_write_complete_chunk(png_ptr, png_gAMA, buf, (png_size_t)4);
}
#endif
#ifdef PNG_WRITE_sRGB_SUPPORTED
/* Write a sRGB chunk */
void /* PRIVATE */
png_write_sRGB(png_structrp png_ptr, int srgb_intent)
{
png_byte buf[1];
png_debug(1, "in png_write_sRGB");
if (srgb_intent >= PNG_sRGB_INTENT_LAST)
png_warning(png_ptr,
"Invalid sRGB rendering intent specified");
buf[0]=(png_byte)srgb_intent;
png_write_complete_chunk(png_ptr, png_sRGB, buf, (png_size_t)1);
}
#endif
#ifdef PNG_WRITE_iCCP_SUPPORTED
/* Write an iCCP chunk */
void /* PRIVATE */
png_write_iCCP(png_structrp png_ptr, png_const_charp name,
png_const_bytep profile)
{
png_uint_32 name_len;
png_uint_32 profile_len;
png_byte new_name[81]; /* 1 byte for the compression byte */
compression_state comp;
png_uint_32 temp;
png_debug(1, "in png_write_iCCP");
/* These are all internal problems: the profile should have been checked
* before when it was stored.
*/
if (profile == NULL)
png_error(png_ptr, "No profile for iCCP chunk"); /* internal error */
profile_len = png_get_uint_32(profile);
if (profile_len < 132)
png_error(png_ptr, "ICC profile too short");
temp = (png_uint_32) (*(profile+8));
if (temp > 3 && (profile_len & 0x03))
png_error(png_ptr, "ICC profile length invalid (not a multiple of 4)");
{
png_uint_32 embedded_profile_len = png_get_uint_32(profile);
if (profile_len != embedded_profile_len)
png_error(png_ptr, "Profile length does not match profile");
}
name_len = png_check_keyword(png_ptr, name, new_name);
if (name_len == 0)
png_error(png_ptr, "iCCP: invalid keyword");
new_name[++name_len] = PNG_COMPRESSION_TYPE_BASE;
/* Make sure we include the NULL after the name and the compression type */
++name_len;
png_text_compress_init(&comp, profile, profile_len);
/* Allow for keyword terminator and compression byte */
if (png_text_compress(png_ptr, png_iCCP, &comp, name_len) != Z_OK)
png_error(png_ptr, png_ptr->zstream.msg);
png_write_chunk_header(png_ptr, png_iCCP, name_len + comp.output_len);
png_write_chunk_data(png_ptr, new_name, name_len);
png_write_compressed_data_out(png_ptr, &comp);
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_sPLT_SUPPORTED
/* Write a sPLT chunk */
void /* PRIVATE */
png_write_sPLT(png_structrp png_ptr, png_const_sPLT_tp spalette)
{
png_uint_32 name_len;
png_byte new_name[80];
png_byte entrybuf[10];
png_size_t entry_size = (spalette->depth == 8 ? 6 : 10);
png_size_t palette_size = entry_size * (png_size_t)spalette->nentries;
png_sPLT_entryp ep;
#ifndef PNG_POINTER_INDEXING_SUPPORTED
int i;
#endif
png_debug(1, "in png_write_sPLT");
name_len = png_check_keyword(png_ptr, spalette->name, new_name);
if (name_len == 0)
png_error(png_ptr, "sPLT: invalid keyword");
/* Make sure we include the NULL after the name */
png_write_chunk_header(png_ptr, png_sPLT,
(png_uint_32)(name_len + 2 + palette_size));
png_write_chunk_data(png_ptr, (png_bytep)new_name,
(png_size_t)(name_len + 1));
png_write_chunk_data(png_ptr, &spalette->depth, (png_size_t)1);
/* Loop through each palette entry, writing appropriately */
#ifdef PNG_POINTER_INDEXING_SUPPORTED
for (ep = spalette->entries; ep<spalette->entries + spalette->nentries; ep++)
{
if (spalette->depth == 8)
{
entrybuf[0] = (png_byte)ep->red;
entrybuf[1] = (png_byte)ep->green;
entrybuf[2] = (png_byte)ep->blue;
entrybuf[3] = (png_byte)ep->alpha;
png_save_uint_16(entrybuf + 4, ep->frequency);
}
else
{
png_save_uint_16(entrybuf + 0, ep->red);
png_save_uint_16(entrybuf + 2, ep->green);
png_save_uint_16(entrybuf + 4, ep->blue);
png_save_uint_16(entrybuf + 6, ep->alpha);
png_save_uint_16(entrybuf + 8, ep->frequency);
}
png_write_chunk_data(png_ptr, entrybuf, entry_size);
}
#else
ep=spalette->entries;
for (i = 0; i>spalette->nentries; i++)
{
if (spalette->depth == 8)
{
entrybuf[0] = (png_byte)ep[i].red;
entrybuf[1] = (png_byte)ep[i].green;
entrybuf[2] = (png_byte)ep[i].blue;
entrybuf[3] = (png_byte)ep[i].alpha;
png_save_uint_16(entrybuf + 4, ep[i].frequency);
}
else
{
png_save_uint_16(entrybuf + 0, ep[i].red);
png_save_uint_16(entrybuf + 2, ep[i].green);
png_save_uint_16(entrybuf + 4, ep[i].blue);
png_save_uint_16(entrybuf + 6, ep[i].alpha);
png_save_uint_16(entrybuf + 8, ep[i].frequency);
}
png_write_chunk_data(png_ptr, entrybuf, entry_size);
}
#endif
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_sBIT_SUPPORTED
/* Write the sBIT chunk */
void /* PRIVATE */
png_write_sBIT(png_structrp png_ptr, png_const_color_8p sbit, int color_type)
{
png_byte buf[4];
png_size_t size;
png_debug(1, "in png_write_sBIT");
/* Make sure we don't depend upon the order of PNG_COLOR_8 */
if ((color_type & PNG_COLOR_MASK_COLOR) != 0)
{
png_byte maxbits;
maxbits = (png_byte)(color_type==PNG_COLOR_TYPE_PALETTE ? 8 :
png_ptr->usr_bit_depth);
if (sbit->red == 0 || sbit->red > maxbits ||
sbit->green == 0 || sbit->green > maxbits ||
sbit->blue == 0 || sbit->blue > maxbits)
{
png_warning(png_ptr, "Invalid sBIT depth specified");
return;
}
buf[0] = sbit->red;
buf[1] = sbit->green;
buf[2] = sbit->blue;
size = 3;
}
else
{
if (sbit->gray == 0 || sbit->gray > png_ptr->usr_bit_depth)
{
png_warning(png_ptr, "Invalid sBIT depth specified");
return;
}
buf[0] = sbit->gray;
size = 1;
}
if ((color_type & PNG_COLOR_MASK_ALPHA) != 0)
{
if (sbit->alpha == 0 || sbit->alpha > png_ptr->usr_bit_depth)
{
png_warning(png_ptr, "Invalid sBIT depth specified");
return;
}
buf[size++] = sbit->alpha;
}
png_write_complete_chunk(png_ptr, png_sBIT, buf, size);
}
#endif
#ifdef PNG_WRITE_cHRM_SUPPORTED
/* Write the cHRM chunk */
void /* PRIVATE */
png_write_cHRM_fixed(png_structrp png_ptr, const png_xy *xy)
{
png_byte buf[32];
png_debug(1, "in png_write_cHRM");
/* Each value is saved in 1/100,000ths */
png_save_int_32(buf, xy->whitex);
png_save_int_32(buf + 4, xy->whitey);
png_save_int_32(buf + 8, xy->redx);
png_save_int_32(buf + 12, xy->redy);
png_save_int_32(buf + 16, xy->greenx);
png_save_int_32(buf + 20, xy->greeny);
png_save_int_32(buf + 24, xy->bluex);
png_save_int_32(buf + 28, xy->bluey);
png_write_complete_chunk(png_ptr, png_cHRM, buf, 32);
}
#endif
#ifdef PNG_WRITE_tRNS_SUPPORTED
/* Write the tRNS chunk */
void /* PRIVATE */
png_write_tRNS(png_structrp png_ptr, png_const_bytep trans_alpha,
png_const_color_16p tran, int num_trans, int color_type)
{
png_byte buf[6];
png_debug(1, "in png_write_tRNS");
if (color_type == PNG_COLOR_TYPE_PALETTE)
{
if (num_trans <= 0 || num_trans > (int)png_ptr->num_palette)
{
png_app_warning(png_ptr,
"Invalid number of transparent colors specified");
return;
}
/* Write the chunk out as it is */
png_write_complete_chunk(png_ptr, png_tRNS, trans_alpha,
(png_size_t)num_trans);
}
else if (color_type == PNG_COLOR_TYPE_GRAY)
{
/* One 16-bit value */
if (tran->gray >= (1 << png_ptr->bit_depth))
{
png_app_warning(png_ptr,
"Ignoring attempt to write tRNS chunk out-of-range for bit_depth");
return;
}
png_save_uint_16(buf, tran->gray);
png_write_complete_chunk(png_ptr, png_tRNS, buf, (png_size_t)2);
}
else if (color_type == PNG_COLOR_TYPE_RGB)
{
/* Three 16-bit values */
png_save_uint_16(buf, tran->red);
png_save_uint_16(buf + 2, tran->green);
png_save_uint_16(buf + 4, tran->blue);
#ifdef PNG_WRITE_16BIT_SUPPORTED
if (png_ptr->bit_depth == 8 && (buf[0] | buf[2] | buf[4]) != 0)
#else
if ((buf[0] | buf[2] | buf[4]) != 0)
#endif
{
png_app_warning(png_ptr,
"Ignoring attempt to write 16-bit tRNS chunk when bit_depth is 8");
return;
}
png_write_complete_chunk(png_ptr, png_tRNS, buf, (png_size_t)6);
}
else
{
png_app_warning(png_ptr, "Can't write tRNS with an alpha channel");
}
}
#endif
#ifdef PNG_WRITE_bKGD_SUPPORTED
/* Write the background chunk */
void /* PRIVATE */
png_write_bKGD(png_structrp png_ptr, png_const_color_16p back, int color_type)
{
png_byte buf[6];
png_debug(1, "in png_write_bKGD");
if (color_type == PNG_COLOR_TYPE_PALETTE)
{
if (
#ifdef PNG_MNG_FEATURES_SUPPORTED
(png_ptr->num_palette != 0 ||
(png_ptr->mng_features_permitted & PNG_FLAG_MNG_EMPTY_PLTE) == 0) &&
#endif
back->index >= png_ptr->num_palette)
{
png_warning(png_ptr, "Invalid background palette index");
return;
}
buf[0] = back->index;
png_write_complete_chunk(png_ptr, png_bKGD, buf, (png_size_t)1);
}
else if ((color_type & PNG_COLOR_MASK_COLOR) != 0)
{
png_save_uint_16(buf, back->red);
png_save_uint_16(buf + 2, back->green);
png_save_uint_16(buf + 4, back->blue);
#ifdef PNG_WRITE_16BIT_SUPPORTED
if (png_ptr->bit_depth == 8 && (buf[0] | buf[2] | buf[4]) != 0)
#else
if ((buf[0] | buf[2] | buf[4]) != 0)
#endif
{
png_warning(png_ptr,
"Ignoring attempt to write 16-bit bKGD chunk "
"when bit_depth is 8");
return;
}
png_write_complete_chunk(png_ptr, png_bKGD, buf, (png_size_t)6);
}
else
{
if (back->gray >= (1 << png_ptr->bit_depth))
{
png_warning(png_ptr,
"Ignoring attempt to write bKGD chunk out-of-range for bit_depth");
return;
}
png_save_uint_16(buf, back->gray);
png_write_complete_chunk(png_ptr, png_bKGD, buf, (png_size_t)2);
}
}
#endif
#ifdef PNG_WRITE_eXIf_SUPPORTED
/* Write the Exif data */
void /* PRIVATE */
png_write_eXIf(png_structrp png_ptr, png_bytep exif, int num_exif)
{
int i;
png_byte buf[1];
png_debug(1, "in png_write_eXIf");
png_write_chunk_header(png_ptr, png_eXIf, (png_uint_32)(num_exif));
for (i = 0; i < num_exif; i++)
{
buf[0] = exif[i];
png_write_chunk_data(png_ptr, buf, (png_size_t)1);
}
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_hIST_SUPPORTED
/* Write the histogram */
void /* PRIVATE */
png_write_hIST(png_structrp png_ptr, png_const_uint_16p hist, int num_hist)
{
int i;
png_byte buf[3];
png_debug(1, "in png_write_hIST");
if (num_hist > (int)png_ptr->num_palette)
{
png_debug2(3, "num_hist = %d, num_palette = %d", num_hist,
png_ptr->num_palette);
png_warning(png_ptr, "Invalid number of histogram entries specified");
return;
}
png_write_chunk_header(png_ptr, png_hIST, (png_uint_32)(num_hist * 2));
for (i = 0; i < num_hist; i++)
{
png_save_uint_16(buf, hist[i]);
png_write_chunk_data(png_ptr, buf, (png_size_t)2);
}
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_tEXt_SUPPORTED
/* Write a tEXt chunk */
void /* PRIVATE */
png_write_tEXt(png_structrp png_ptr, png_const_charp key, png_const_charp text,
png_size_t text_len)
{
png_uint_32 key_len;
png_byte new_key[80];
png_debug(1, "in png_write_tEXt");
key_len = png_check_keyword(png_ptr, key, new_key);
if (key_len == 0)
png_error(png_ptr, "tEXt: invalid keyword");
if (text == NULL || *text == '\0')
text_len = 0;
else
text_len = strlen(text);
if (text_len > PNG_UINT_31_MAX - (key_len+1))
png_error(png_ptr, "tEXt: text too long");
/* Make sure we include the 0 after the key */
png_write_chunk_header(png_ptr, png_tEXt,
(png_uint_32)/*checked above*/(key_len + text_len + 1));
/*
* We leave it to the application to meet PNG-1.0 requirements on the
* contents of the text. PNG-1.0 through PNG-1.2 discourage the use of
* any non-Latin-1 characters except for NEWLINE. ISO PNG will forbid them.
* The NUL character is forbidden by PNG-1.0 through PNG-1.2 and ISO PNG.
*/
png_write_chunk_data(png_ptr, new_key, key_len + 1);
if (text_len != 0)
png_write_chunk_data(png_ptr, (png_const_bytep)text, text_len);
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_zTXt_SUPPORTED
/* Write a compressed text chunk */
void /* PRIVATE */
png_write_zTXt(png_structrp png_ptr, png_const_charp key, png_const_charp text,
int compression)
{
png_uint_32 key_len;
png_byte new_key[81];
compression_state comp;
png_debug(1, "in png_write_zTXt");
if (compression == PNG_TEXT_COMPRESSION_NONE)
{
png_write_tEXt(png_ptr, key, text, 0);
return;
}
if (compression != PNG_TEXT_COMPRESSION_zTXt)
png_error(png_ptr, "zTXt: invalid compression type");
key_len = png_check_keyword(png_ptr, key, new_key);
if (key_len == 0)
png_error(png_ptr, "zTXt: invalid keyword");
/* Add the compression method and 1 for the keyword separator. */
new_key[++key_len] = PNG_COMPRESSION_TYPE_BASE;
++key_len;
/* Compute the compressed data; do it now for the length */
png_text_compress_init(&comp, (png_const_bytep)text,
text == NULL ? 0 : strlen(text));
if (png_text_compress(png_ptr, png_zTXt, &comp, key_len) != Z_OK)
png_error(png_ptr, png_ptr->zstream.msg);
/* Write start of chunk */
png_write_chunk_header(png_ptr, png_zTXt, key_len + comp.output_len);
/* Write key */
png_write_chunk_data(png_ptr, new_key, key_len);
/* Write the compressed data */
png_write_compressed_data_out(png_ptr, &comp);
/* Close the chunk */
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_iTXt_SUPPORTED
/* Write an iTXt chunk */
void /* PRIVATE */
png_write_iTXt(png_structrp png_ptr, int compression, png_const_charp key,
png_const_charp lang, png_const_charp lang_key, png_const_charp text)
{
png_uint_32 key_len, prefix_len;
png_size_t lang_len, lang_key_len;
png_byte new_key[82];
compression_state comp;
png_debug(1, "in png_write_iTXt");
key_len = png_check_keyword(png_ptr, key, new_key);
if (key_len == 0)
png_error(png_ptr, "iTXt: invalid keyword");
/* Set the compression flag */
switch (compression)
{
case PNG_ITXT_COMPRESSION_NONE:
case PNG_TEXT_COMPRESSION_NONE:
compression = new_key[++key_len] = 0; /* no compression */
break;
case PNG_TEXT_COMPRESSION_zTXt:
case PNG_ITXT_COMPRESSION_zTXt:
compression = new_key[++key_len] = 1; /* compressed */
break;
default:
png_error(png_ptr, "iTXt: invalid compression");
}
new_key[++key_len] = PNG_COMPRESSION_TYPE_BASE;
++key_len; /* for the keywod separator */
/* We leave it to the application to meet PNG-1.0 requirements on the
* contents of the text. PNG-1.0 through PNG-1.2 discourage the use of
* any non-Latin-1 characters except for NEWLINE. ISO PNG, however,
* specifies that the text is UTF-8 and this really doesn't require any
* checking.
*
* The NUL character is forbidden by PNG-1.0 through PNG-1.2 and ISO PNG.
*
* TODO: validate the language tag correctly (see the spec.)
*/
if (lang == NULL) lang = ""; /* empty language is valid */
lang_len = strlen(lang)+1;
if (lang_key == NULL) lang_key = ""; /* may be empty */
lang_key_len = strlen(lang_key)+1;
if (text == NULL) text = ""; /* may be empty */
prefix_len = key_len;
if (lang_len > PNG_UINT_31_MAX-prefix_len)
prefix_len = PNG_UINT_31_MAX;
else
prefix_len = (png_uint_32)(prefix_len + lang_len);
if (lang_key_len > PNG_UINT_31_MAX-prefix_len)
prefix_len = PNG_UINT_31_MAX;
else
prefix_len = (png_uint_32)(prefix_len + lang_key_len);
png_text_compress_init(&comp, (png_const_bytep)text, strlen(text));
if (compression != 0)
{
if (png_text_compress(png_ptr, png_iTXt, &comp, prefix_len) != Z_OK)
png_error(png_ptr, png_ptr->zstream.msg);
}
else
{
if (comp.input_len > PNG_UINT_31_MAX-prefix_len)
png_error(png_ptr, "iTXt: uncompressed text too long");
/* So the string will fit in a chunk: */
comp.output_len = (png_uint_32)/*SAFE*/comp.input_len;
}
png_write_chunk_header(png_ptr, png_iTXt, comp.output_len + prefix_len);
png_write_chunk_data(png_ptr, new_key, key_len);
png_write_chunk_data(png_ptr, (png_const_bytep)lang, lang_len);
png_write_chunk_data(png_ptr, (png_const_bytep)lang_key, lang_key_len);
if (compression != 0)
png_write_compressed_data_out(png_ptr, &comp);
else
png_write_chunk_data(png_ptr, (png_const_bytep)text, comp.output_len);
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_oFFs_SUPPORTED
/* Write the oFFs chunk */
void /* PRIVATE */
png_write_oFFs(png_structrp png_ptr, png_int_32 x_offset, png_int_32 y_offset,
int unit_type)
{
png_byte buf[9];
png_debug(1, "in png_write_oFFs");
if (unit_type >= PNG_OFFSET_LAST)
png_warning(png_ptr, "Unrecognized unit type for oFFs chunk");
png_save_int_32(buf, x_offset);
png_save_int_32(buf + 4, y_offset);
buf[8] = (png_byte)unit_type;
png_write_complete_chunk(png_ptr, png_oFFs, buf, (png_size_t)9);
}
#endif
#ifdef PNG_WRITE_pCAL_SUPPORTED
/* Write the pCAL chunk (described in the PNG extensions document) */
void /* PRIVATE */
png_write_pCAL(png_structrp png_ptr, png_charp purpose, png_int_32 X0,
png_int_32 X1, int type, int nparams, png_const_charp units,
png_charpp params)
{
png_uint_32 purpose_len;
png_size_t units_len, total_len;
png_size_tp params_len;
png_byte buf[10];
png_byte new_purpose[80];
int i;
png_debug1(1, "in png_write_pCAL (%d parameters)", nparams);
if (type >= PNG_EQUATION_LAST)
png_error(png_ptr, "Unrecognized equation type for pCAL chunk");
purpose_len = png_check_keyword(png_ptr, purpose, new_purpose);
if (purpose_len == 0)
png_error(png_ptr, "pCAL: invalid keyword");
++purpose_len; /* terminator */
png_debug1(3, "pCAL purpose length = %d", (int)purpose_len);
units_len = strlen(units) + (nparams == 0 ? 0 : 1);
png_debug1(3, "pCAL units length = %d", (int)units_len);
total_len = purpose_len + units_len + 10;
params_len = (png_size_tp)png_malloc(png_ptr,
(png_alloc_size_t)((png_alloc_size_t)nparams * (sizeof (png_size_t))));
/* Find the length of each parameter, making sure we don't count the
* null terminator for the last parameter.
*/
for (i = 0; i < nparams; i++)
{
params_len[i] = strlen(params[i]) + (i == nparams - 1 ? 0 : 1);
png_debug2(3, "pCAL parameter %d length = %lu", i,
(unsigned long)params_len[i]);
total_len += params_len[i];
}
png_debug1(3, "pCAL total length = %d", (int)total_len);
png_write_chunk_header(png_ptr, png_pCAL, (png_uint_32)total_len);
png_write_chunk_data(png_ptr, new_purpose, purpose_len);
png_save_int_32(buf, X0);
png_save_int_32(buf + 4, X1);
buf[8] = (png_byte)type;
buf[9] = (png_byte)nparams;
png_write_chunk_data(png_ptr, buf, (png_size_t)10);
png_write_chunk_data(png_ptr, (png_const_bytep)units, (png_size_t)units_len);
for (i = 0; i < nparams; i++)
{
png_write_chunk_data(png_ptr, (png_const_bytep)params[i], params_len[i]);
}
png_free(png_ptr, params_len);
png_write_chunk_end(png_ptr);
}
#endif
#ifdef PNG_WRITE_sCAL_SUPPORTED
/* Write the sCAL chunk */
void /* PRIVATE */
png_write_sCAL_s(png_structrp png_ptr, int unit, png_const_charp width,
png_const_charp height)
{
png_byte buf[64];
png_size_t wlen, hlen, total_len;
png_debug(1, "in png_write_sCAL_s");
wlen = strlen(width);
hlen = strlen(height);
total_len = wlen + hlen + 2;
if (total_len > 64)
{
png_warning(png_ptr, "Can't write sCAL (buffer too small)");
return;
}
buf[0] = (png_byte)unit;
memcpy(buf + 1, width, wlen + 1); /* Append the '\0' here */
memcpy(buf + wlen + 2, height, hlen); /* Do NOT append the '\0' here */
png_debug1(3, "sCAL total length = %u", (unsigned int)total_len);
png_write_complete_chunk(png_ptr, png_sCAL, buf, total_len);
}
#endif
#ifdef PNG_WRITE_pHYs_SUPPORTED
/* Write the pHYs chunk */
void /* PRIVATE */
png_write_pHYs(png_structrp png_ptr, png_uint_32 x_pixels_per_unit,
png_uint_32 y_pixels_per_unit,
int unit_type)
{
png_byte buf[9];
png_debug(1, "in png_write_pHYs");
if (unit_type >= PNG_RESOLUTION_LAST)
png_warning(png_ptr, "Unrecognized unit type for pHYs chunk");
png_save_uint_32(buf, x_pixels_per_unit);
png_save_uint_32(buf + 4, y_pixels_per_unit);
buf[8] = (png_byte)unit_type;
png_write_complete_chunk(png_ptr, png_pHYs, buf, (png_size_t)9);
}
#endif
#ifdef PNG_WRITE_tIME_SUPPORTED
/* Write the tIME chunk. Use either png_convert_from_struct_tm()
* or png_convert_from_time_t(), or fill in the structure yourself.
*/
void /* PRIVATE */
png_write_tIME(png_structrp png_ptr, png_const_timep mod_time)
{
png_byte buf[7];
png_debug(1, "in png_write_tIME");
if (mod_time->month > 12 || mod_time->month < 1 ||
mod_time->day > 31 || mod_time->day < 1 ||
mod_time->hour > 23 || mod_time->second > 60)
{
png_warning(png_ptr, "Invalid time specified for tIME chunk");
return;
}
png_save_uint_16(buf, mod_time->year);
buf[2] = mod_time->month;
buf[3] = mod_time->day;
buf[4] = mod_time->hour;
buf[5] = mod_time->minute;
buf[6] = mod_time->second;
png_write_complete_chunk(png_ptr, png_tIME, buf, (png_size_t)7);
}
#endif
/* Initializes the row writing capability of libpng */
void /* PRIVATE */
png_write_start_row(png_structrp png_ptr)
{
#ifdef PNG_WRITE_INTERLACING_SUPPORTED
/* Arrays to facilitate easy interlacing - use pass (0 - 6) as index */
/* Start of interlace block */
static PNG_CONST png_byte png_pass_start[7] = {0, 4, 0, 2, 0, 1, 0};
/* Offset to next interlace block */
static PNG_CONST png_byte png_pass_inc[7] = {8, 8, 4, 4, 2, 2, 1};
/* Start of interlace block in the y direction */
static PNG_CONST png_byte png_pass_ystart[7] = {0, 0, 4, 0, 2, 0, 1};
/* Offset to next interlace block in the y direction */
static PNG_CONST png_byte png_pass_yinc[7] = {8, 8, 8, 4, 4, 2, 2};
#endif
png_alloc_size_t buf_size;
int usr_pixel_depth;
#ifdef PNG_WRITE_FILTER_SUPPORTED
png_byte filters;
#endif
png_debug(1, "in png_write_start_row");
usr_pixel_depth = png_ptr->usr_channels * png_ptr->usr_bit_depth;
buf_size = PNG_ROWBYTES(usr_pixel_depth, png_ptr->width) + 1;
/* 1.5.6: added to allow checking in the row write code. */
png_ptr->transformed_pixel_depth = png_ptr->pixel_depth;
png_ptr->maximum_pixel_depth = (png_byte)usr_pixel_depth;
/* Set up row buffer */
png_ptr->row_buf = png_voidcast(png_bytep, png_malloc(png_ptr, buf_size));
png_ptr->row_buf[0] = PNG_FILTER_VALUE_NONE;
#ifdef PNG_WRITE_FILTER_SUPPORTED
filters = png_ptr->do_filter;
if (png_ptr->height == 1)
filters &= 0xff & ~(PNG_FILTER_UP|PNG_FILTER_AVG|PNG_FILTER_PAETH);
if (png_ptr->width == 1)
filters &= 0xff & ~(PNG_FILTER_SUB|PNG_FILTER_AVG|PNG_FILTER_PAETH);
if (filters == 0)
filters = PNG_FILTER_NONE;
png_ptr->do_filter = filters;
if (((filters & (PNG_FILTER_SUB | PNG_FILTER_UP | PNG_FILTER_AVG |
PNG_FILTER_PAETH)) != 0) && png_ptr->try_row == NULL)
{
int num_filters = 0;
png_ptr->try_row = png_voidcast(png_bytep, png_malloc(png_ptr, buf_size));
if (filters & PNG_FILTER_SUB)
num_filters++;
if (filters & PNG_FILTER_UP)
num_filters++;
if (filters & PNG_FILTER_AVG)
num_filters++;
if (filters & PNG_FILTER_PAETH)
num_filters++;
if (num_filters > 1)
png_ptr->tst_row = png_voidcast(png_bytep, png_malloc(png_ptr,
buf_size));
}
/* We only need to keep the previous row if we are using one of the following
* filters.
*/
if ((filters & (PNG_FILTER_AVG | PNG_FILTER_UP | PNG_FILTER_PAETH)) != 0)
png_ptr->prev_row = png_voidcast(png_bytep,
png_calloc(png_ptr, buf_size));
#endif /* WRITE_FILTER */
#ifdef PNG_WRITE_INTERLACING_SUPPORTED
/* If interlaced, we need to set up width and height of pass */
if (png_ptr->interlaced != 0)
{
if ((png_ptr->transformations & PNG_INTERLACE) == 0)
{
png_ptr->num_rows = (png_ptr->height + png_pass_yinc[0] - 1 -
png_pass_ystart[0]) / png_pass_yinc[0];
png_ptr->usr_width = (png_ptr->width + png_pass_inc[0] - 1 -
png_pass_start[0]) / png_pass_inc[0];
}
else
{
png_ptr->num_rows = png_ptr->height;
png_ptr->usr_width = png_ptr->width;
}
}
else
#endif
{
png_ptr->num_rows = png_ptr->height;
png_ptr->usr_width = png_ptr->width;
}
}
/* Internal use only. Called when finished processing a row of data. */
void /* PRIVATE */
png_write_finish_row(png_structrp png_ptr)
{
#ifdef PNG_WRITE_INTERLACING_SUPPORTED
/* Arrays to facilitate easy interlacing - use pass (0 - 6) as index */
/* Start of interlace block */
static PNG_CONST png_byte png_pass_start[7] = {0, 4, 0, 2, 0, 1, 0};
/* Offset to next interlace block */
static PNG_CONST png_byte png_pass_inc[7] = {8, 8, 4, 4, 2, 2, 1};
/* Start of interlace block in the y direction */
static PNG_CONST png_byte png_pass_ystart[7] = {0, 0, 4, 0, 2, 0, 1};
/* Offset to next interlace block in the y direction */
static PNG_CONST png_byte png_pass_yinc[7] = {8, 8, 8, 4, 4, 2, 2};
#endif
png_debug(1, "in png_write_finish_row");
/* Next row */
png_ptr->row_number++;
/* See if we are done */
if (png_ptr->row_number < png_ptr->num_rows)
return;
#ifdef PNG_WRITE_INTERLACING_SUPPORTED
/* If interlaced, go to next pass */
if (png_ptr->interlaced != 0)
{
png_ptr->row_number = 0;
if ((png_ptr->transformations & PNG_INTERLACE) != 0)
{
png_ptr->pass++;
}
else
{
/* Loop until we find a non-zero width or height pass */
do
{
png_ptr->pass++;
if (png_ptr->pass >= 7)
break;
png_ptr->usr_width = (png_ptr->width +
png_pass_inc[png_ptr->pass] - 1 -
png_pass_start[png_ptr->pass]) /
png_pass_inc[png_ptr->pass];
png_ptr->num_rows = (png_ptr->height +
png_pass_yinc[png_ptr->pass] - 1 -
png_pass_ystart[png_ptr->pass]) /
png_pass_yinc[png_ptr->pass];
if ((png_ptr->transformations & PNG_INTERLACE) != 0)
break;
} while (png_ptr->usr_width == 0 || png_ptr->num_rows == 0);
}
/* Reset the row above the image for the next pass */
if (png_ptr->pass < 7)
{
if (png_ptr->prev_row != NULL)
memset(png_ptr->prev_row, 0,
(png_size_t)(PNG_ROWBYTES(png_ptr->usr_channels*
png_ptr->usr_bit_depth, png_ptr->width)) + 1);
return;
}
}
#endif
/* If we get here, we've just written the last row, so we need
to flush the compressor */
png_compress_IDAT(png_ptr, NULL, 0, Z_FINISH);
}
#ifdef PNG_WRITE_INTERLACING_SUPPORTED
/* Pick out the correct pixels for the interlace pass.
* The basic idea here is to go through the row with a source
* pointer and a destination pointer (sp and dp), and copy the
* correct pixels for the pass. As the row gets compacted,
* sp will always be >= dp, so we should never overwrite anything.
* See the default: case for the easiest code to understand.
*/
void /* PRIVATE */
png_do_write_interlace(png_row_infop row_info, png_bytep row, int pass)
{
/* Arrays to facilitate easy interlacing - use pass (0 - 6) as index */
/* Start of interlace block */
static PNG_CONST png_byte png_pass_start[7] = {0, 4, 0, 2, 0, 1, 0};
/* Offset to next interlace block */
static PNG_CONST png_byte png_pass_inc[7] = {8, 8, 4, 4, 2, 2, 1};
png_debug(1, "in png_do_write_interlace");
/* We don't have to do anything on the last pass (6) */
if (pass < 6)
{
/* Each pixel depth is handled separately */
switch (row_info->pixel_depth)
{
case 1:
{
png_bytep sp;
png_bytep dp;
unsigned int shift;
int d;
int value;
png_uint_32 i;
png_uint_32 row_width = row_info->width;
dp = row;
d = 0;
shift = 7;
for (i = png_pass_start[pass]; i < row_width;
i += png_pass_inc[pass])
{
sp = row + (png_size_t)(i >> 3);
value = (int)(*sp >> (7 - (int)(i & 0x07))) & 0x01;
d |= (value << shift);
if (shift == 0)
{
shift = 7;
*dp++ = (png_byte)d;
d = 0;
}
else
shift--;
}
if (shift != 7)
*dp = (png_byte)d;
break;
}
case 2:
{
png_bytep sp;
png_bytep dp;
unsigned int shift;
int d;
int value;
png_uint_32 i;
png_uint_32 row_width = row_info->width;
dp = row;
shift = 6;
d = 0;
for (i = png_pass_start[pass]; i < row_width;
i += png_pass_inc[pass])
{
sp = row + (png_size_t)(i >> 2);
value = (*sp >> ((3 - (int)(i & 0x03)) << 1)) & 0x03;
d |= (value << shift);
if (shift == 0)
{
shift = 6;
*dp++ = (png_byte)d;
d = 0;
}
else
shift -= 2;
}
if (shift != 6)
*dp = (png_byte)d;
break;
}
case 4:
{
png_bytep sp;
png_bytep dp;
unsigned int shift;
int d;
int value;
png_uint_32 i;
png_uint_32 row_width = row_info->width;
dp = row;
shift = 4;
d = 0;
for (i = png_pass_start[pass]; i < row_width;
i += png_pass_inc[pass])
{
sp = row + (png_size_t)(i >> 1);
value = (*sp >> ((1 - (int)(i & 0x01)) << 2)) & 0x0f;
d |= (value << shift);
if (shift == 0)
{
shift = 4;
*dp++ = (png_byte)d;
d = 0;
}
else
shift -= 4;
}
if (shift != 4)
*dp = (png_byte)d;
break;
}
default:
{
png_bytep sp;
png_bytep dp;
png_uint_32 i;
png_uint_32 row_width = row_info->width;
png_size_t pixel_bytes;
/* Start at the beginning */
dp = row;
/* Find out how many bytes each pixel takes up */
pixel_bytes = (row_info->pixel_depth >> 3);
/* Loop through the row, only looking at the pixels that matter */
for (i = png_pass_start[pass]; i < row_width;
i += png_pass_inc[pass])
{
/* Find out where the original pixel is */
sp = row + (png_size_t)i * pixel_bytes;
/* Move the pixel */
if (dp != sp)
memcpy(dp, sp, pixel_bytes);
/* Next pixel */
dp += pixel_bytes;
}
break;
}
}
/* Set new row width */
row_info->width = (row_info->width +
png_pass_inc[pass] - 1 -
png_pass_start[pass]) /
png_pass_inc[pass];
row_info->rowbytes = PNG_ROWBYTES(row_info->pixel_depth,
row_info->width);
}
}
#endif
/* This filters the row, chooses which filter to use, if it has not already
* been specified by the application, and then writes the row out with the
* chosen filter.
*/
static void /* PRIVATE */
png_write_filtered_row(png_structrp png_ptr, png_bytep filtered_row,
png_size_t row_bytes);
#ifdef PNG_WRITE_FILTER_SUPPORTED
static png_size_t /* PRIVATE */
png_setup_sub_row(png_structrp png_ptr, const png_uint_32 bpp,
const png_size_t row_bytes, const png_size_t lmins)
{
png_bytep rp, dp, lp;
png_size_t i;
png_size_t sum = 0;
unsigned int v;
png_ptr->try_row[0] = PNG_FILTER_VALUE_SUB;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1; i < bpp;
i++, rp++, dp++)
{
v = *dp = *rp;
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
}
for (lp = png_ptr->row_buf + 1; i < row_bytes;
i++, rp++, lp++, dp++)
{
v = *dp = (png_byte)(((int)*rp - (int)*lp) & 0xff);
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
if (sum > lmins) /* We are already worse, don't continue. */
break;
}
return (sum);
}
static void /* PRIVATE */
png_setup_sub_row_only(png_structrp png_ptr, const png_uint_32 bpp,
const png_size_t row_bytes)
{
png_bytep rp, dp, lp;
png_size_t i;
png_ptr->try_row[0] = PNG_FILTER_VALUE_SUB;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1; i < bpp;
i++, rp++, dp++)
{
*dp = *rp;
}
for (lp = png_ptr->row_buf + 1; i < row_bytes;
i++, rp++, lp++, dp++)
{
*dp = (png_byte)(((int)*rp - (int)*lp) & 0xff);
}
}
static png_size_t /* PRIVATE */
png_setup_up_row(png_structrp png_ptr, const png_size_t row_bytes,
const png_size_t lmins)
{
png_bytep rp, dp, pp;
png_size_t i;
png_size_t sum = 0;
unsigned int v;
png_ptr->try_row[0] = PNG_FILTER_VALUE_UP;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1,
pp = png_ptr->prev_row + 1; i < row_bytes;
i++, rp++, pp++, dp++)
{
v = *dp = (png_byte)(((int)*rp - (int)*pp) & 0xff);
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
if (sum > lmins) /* We are already worse, don't continue. */
break;
}
return (sum);
}
static void /* PRIVATE */
png_setup_up_row_only(png_structrp png_ptr, const png_size_t row_bytes)
{
png_bytep rp, dp, pp;
png_size_t i;
png_ptr->try_row[0] = PNG_FILTER_VALUE_UP;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1,
pp = png_ptr->prev_row + 1; i < row_bytes;
i++, rp++, pp++, dp++)
{
*dp = (png_byte)(((int)*rp - (int)*pp) & 0xff);
}
}
static png_size_t /* PRIVATE */
png_setup_avg_row(png_structrp png_ptr, const png_uint_32 bpp,
const png_size_t row_bytes, const png_size_t lmins)
{
png_bytep rp, dp, pp, lp;
png_uint_32 i;
png_size_t sum = 0;
unsigned int v;
png_ptr->try_row[0] = PNG_FILTER_VALUE_AVG;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1,
pp = png_ptr->prev_row + 1; i < bpp; i++)
{
v = *dp++ = (png_byte)(((int)*rp++ - ((int)*pp++ / 2)) & 0xff);
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
}
for (lp = png_ptr->row_buf + 1; i < row_bytes; i++)
{
v = *dp++ = (png_byte)(((int)*rp++ - (((int)*pp++ + (int)*lp++) / 2))
& 0xff);
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
if (sum > lmins) /* We are already worse, don't continue. */
break;
}
return (sum);
}
static void /* PRIVATE */
png_setup_avg_row_only(png_structrp png_ptr, const png_uint_32 bpp,
const png_size_t row_bytes)
{
png_bytep rp, dp, pp, lp;
png_uint_32 i;
png_ptr->try_row[0] = PNG_FILTER_VALUE_AVG;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1,
pp = png_ptr->prev_row + 1; i < bpp; i++)
{
*dp++ = (png_byte)(((int)*rp++ - ((int)*pp++ / 2)) & 0xff);
}
for (lp = png_ptr->row_buf + 1; i < row_bytes; i++)
{
*dp++ = (png_byte)(((int)*rp++ - (((int)*pp++ + (int)*lp++) / 2))
& 0xff);
}
}
static png_size_t /* PRIVATE */
png_setup_paeth_row(png_structrp png_ptr, const png_uint_32 bpp,
const png_size_t row_bytes, const png_size_t lmins)
{
png_bytep rp, dp, pp, cp, lp;
png_size_t i;
png_size_t sum = 0;
unsigned int v;
png_ptr->try_row[0] = PNG_FILTER_VALUE_PAETH;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1,
pp = png_ptr->prev_row + 1; i < bpp; i++)
{
v = *dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff);
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
}
for (lp = png_ptr->row_buf + 1, cp = png_ptr->prev_row + 1; i < row_bytes;
i++)
{
int a, b, c, pa, pb, pc, p;
b = *pp++;
c = *cp++;
a = *lp++;
p = b - c;
pc = a - c;
#ifdef PNG_USE_ABS
pa = abs(p);
pb = abs(pc);
pc = abs(p + pc);
#else
pa = p < 0 ? -p : p;
pb = pc < 0 ? -pc : pc;
pc = (p + pc) < 0 ? -(p + pc) : p + pc;
#endif
p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c;
v = *dp++ = (png_byte)(((int)*rp++ - p) & 0xff);
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
if (sum > lmins) /* We are already worse, don't continue. */
break;
}
return (sum);
}
static void /* PRIVATE */
png_setup_paeth_row_only(png_structrp png_ptr, const png_uint_32 bpp,
const png_size_t row_bytes)
{
png_bytep rp, dp, pp, cp, lp;
png_size_t i;
png_ptr->try_row[0] = PNG_FILTER_VALUE_PAETH;
for (i = 0, rp = png_ptr->row_buf + 1, dp = png_ptr->try_row + 1,
pp = png_ptr->prev_row + 1; i < bpp; i++)
{
*dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff);
}
for (lp = png_ptr->row_buf + 1, cp = png_ptr->prev_row + 1; i < row_bytes;
i++)
{
int a, b, c, pa, pb, pc, p;
b = *pp++;
c = *cp++;
a = *lp++;
p = b - c;
pc = a - c;
#ifdef PNG_USE_ABS
pa = abs(p);
pb = abs(pc);
pc = abs(p + pc);
#else
pa = p < 0 ? -p : p;
pb = pc < 0 ? -pc : pc;
pc = (p + pc) < 0 ? -(p + pc) : p + pc;
#endif
p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c;
*dp++ = (png_byte)(((int)*rp++ - p) & 0xff);
}
}
#endif /* WRITE_FILTER */
void /* PRIVATE */
png_write_find_filter(png_structrp png_ptr, png_row_infop row_info)
{
#ifndef PNG_WRITE_FILTER_SUPPORTED
png_write_filtered_row(png_ptr, png_ptr->row_buf, row_info->rowbytes+1);
#else
unsigned int filter_to_do = png_ptr->do_filter;
png_bytep row_buf;
png_bytep best_row;
png_uint_32 bpp;
png_size_t mins;
png_size_t row_bytes = row_info->rowbytes;
png_debug(1, "in png_write_find_filter");
/* Find out how many bytes offset each pixel is */
bpp = (row_info->pixel_depth + 7) >> 3;
row_buf = png_ptr->row_buf;
mins = PNG_SIZE_MAX - 256/* so we can detect potential overflow of the
running sum */;
/* The prediction method we use is to find which method provides the
* smallest value when summing the absolute values of the distances
* from zero, using anything >= 128 as negative numbers. This is known
* as the "minimum sum of absolute differences" heuristic. Other
* heuristics are the "weighted minimum sum of absolute differences"
* (experimental and can in theory improve compression), and the "zlib
* predictive" method (not implemented yet), which does test compressions
* of lines using different filter methods, and then chooses the
* (series of) filter(s) that give minimum compressed data size (VERY
* computationally expensive).
*
* GRR 980525: consider also
*
* (1) minimum sum of absolute differences from running average (i.e.,
* keep running sum of non-absolute differences & count of bytes)
* [track dispersion, too? restart average if dispersion too large?]
*
* (1b) minimum sum of absolute differences from sliding average, probably
* with window size <= deflate window (usually 32K)
*
* (2) minimum sum of squared differences from zero or running average
* (i.e., ~ root-mean-square approach)
*/
/* We don't need to test the 'no filter' case if this is the only filter
* that has been chosen, as it doesn't actually do anything to the data.
*/
best_row = png_ptr->row_buf;
if (PNG_SIZE_MAX/128 <= row_bytes)
{
/* Overflow can occur in the calculation, just select the lowest set
* filter.
*/
filter_to_do &= 0U-filter_to_do;
}
else if ((filter_to_do & PNG_FILTER_NONE) != 0 &&
filter_to_do != PNG_FILTER_NONE)
{
/* Overflow not possible and multiple filters in the list, including the
* 'none' filter.
*/
png_bytep rp;
png_size_t sum = 0;
png_size_t i;
unsigned int v;
{
for (i = 0, rp = row_buf + 1; i < row_bytes; i++, rp++)
{
v = *rp;
#ifdef PNG_USE_ABS
sum += 128 - abs((int)v - 128);
#else
sum += (v < 128) ? v : 256 - v;
#endif
}
}
mins = sum;
}
/* Sub filter */
if (filter_to_do == PNG_FILTER_SUB)
/* It's the only filter so no testing is needed */
{
png_setup_sub_row_only(png_ptr, bpp, row_bytes);
best_row = png_ptr->try_row;
}
else if ((filter_to_do & PNG_FILTER_SUB) != 0)
{
png_size_t sum;
png_size_t lmins = mins;
sum = png_setup_sub_row(png_ptr, bpp, row_bytes, lmins);
if (sum < mins)
{
mins = sum;
best_row = png_ptr->try_row;
if (png_ptr->tst_row != NULL)
{
png_ptr->try_row = png_ptr->tst_row;
png_ptr->tst_row = best_row;
}
}
}
/* Up filter */
if (filter_to_do == PNG_FILTER_UP)
{
png_setup_up_row_only(png_ptr, row_bytes);
best_row = png_ptr->try_row;
}
else if ((filter_to_do & PNG_FILTER_UP) != 0)
{
png_size_t sum;
png_size_t lmins = mins;
sum = png_setup_up_row(png_ptr, row_bytes, lmins);
if (sum < mins)
{
mins = sum;
best_row = png_ptr->try_row;
if (png_ptr->tst_row != NULL)
{
png_ptr->try_row = png_ptr->tst_row;
png_ptr->tst_row = best_row;
}
}
}
/* Avg filter */
if (filter_to_do == PNG_FILTER_AVG)
{
png_setup_avg_row_only(png_ptr, bpp, row_bytes);
best_row = png_ptr->try_row;
}
else if ((filter_to_do & PNG_FILTER_AVG) != 0)
{
png_size_t sum;
png_size_t lmins = mins;
sum= png_setup_avg_row(png_ptr, bpp, row_bytes, lmins);
if (sum < mins)
{
mins = sum;
best_row = png_ptr->try_row;
if (png_ptr->tst_row != NULL)
{
png_ptr->try_row = png_ptr->tst_row;
png_ptr->tst_row = best_row;
}
}
}
/* Paeth filter */
if (filter_to_do == PNG_FILTER_PAETH)
{
png_setup_paeth_row_only(png_ptr, bpp, row_bytes);
best_row = png_ptr->try_row;
}
else if ((filter_to_do & PNG_FILTER_PAETH) != 0)
{
png_size_t sum;
png_size_t lmins = mins;
sum = png_setup_paeth_row(png_ptr, bpp, row_bytes, lmins);
if (sum < mins)
{
best_row = png_ptr->try_row;
if (png_ptr->tst_row != NULL)
{
png_ptr->try_row = png_ptr->tst_row;
png_ptr->tst_row = best_row;
}
}
}
/* Do the actual writing of the filtered row data from the chosen filter. */
png_write_filtered_row(png_ptr, best_row, row_info->rowbytes+1);
#endif /* WRITE_FILTER */
}
/* Do the actual writing of a previously filtered row. */
static void
png_write_filtered_row(png_structrp png_ptr, png_bytep filtered_row,
png_size_t full_row_length/*includes filter byte*/)
{
png_debug(1, "in png_write_filtered_row");
png_debug1(2, "filter = %d", filtered_row[0]);
png_compress_IDAT(png_ptr, filtered_row, full_row_length, Z_NO_FLUSH);
#ifdef PNG_WRITE_FILTER_SUPPORTED
/* Swap the current and previous rows */
if (png_ptr->prev_row != NULL)
{
png_bytep tptr;
tptr = png_ptr->prev_row;
png_ptr->prev_row = png_ptr->row_buf;
png_ptr->row_buf = tptr;
}
#endif /* WRITE_FILTER */
/* Finish row - updates counters and flushes zlib if last row */
png_write_finish_row(png_ptr);
#ifdef PNG_WRITE_FLUSH_SUPPORTED
png_ptr->flush_rows++;
if (png_ptr->flush_dist > 0 &&
png_ptr->flush_rows >= png_ptr->flush_dist)
{
png_write_flush(png_ptr);
}
#endif /* WRITE_FLUSH */
}
#endif /* WRITE */
| null | null | null | null | 25,558 |
2,585 | null |
train_val
|
04b570817b2b38e35675b17328239746212f4c3f
| 155,642 |
FFmpeg
| 0 |
https://github.com/FFmpeg/FFmpeg
|
2018-06-01 01:23:12+05:30
|
/*
* The simplest mpeg encoder (well, it was the simplest!)
* Copyright (c) 2000,2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/dct.h"
#include "libavcodec/mpegvideo.h"
/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
DECLARE_ALIGNED(16, static const uint16_t, inv_zigzag_direct16)[64] = {
1, 2, 6, 7, 15, 16, 28, 29,
3, 5, 8, 14, 17, 27, 30, 43,
4, 9, 13, 18, 26, 31, 42, 44,
10, 12, 19, 25, 32, 41, 45, 54,
11, 20, 24, 33, 40, 46, 53, 55,
21, 23, 34, 39, 47, 52, 56, 61,
22, 35, 38, 48, 51, 57, 60, 62,
36, 37, 49, 50, 58, 59, 63, 64,
};
#if HAVE_6REGS
#if HAVE_MMX_INLINE
#define COMPILE_TEMPLATE_MMXEXT 0
#define COMPILE_TEMPLATE_SSE2 0
#define COMPILE_TEMPLATE_SSSE3 0
#define RENAME(a) a ## _mmx
#define RENAME_FDCT(a) a ## _mmx
#include "mpegvideoenc_template.c"
#endif /* HAVE_MMX_INLINE */
#if HAVE_MMXEXT_INLINE
#undef COMPILE_TEMPLATE_SSSE3
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_MMXEXT
#define COMPILE_TEMPLATE_MMXEXT 1
#define COMPILE_TEMPLATE_SSE2 0
#define COMPILE_TEMPLATE_SSSE3 0
#undef RENAME
#undef RENAME_FDCT
#define RENAME(a) a ## _mmxext
#define RENAME_FDCT(a) a ## _mmxext
#include "mpegvideoenc_template.c"
#endif /* HAVE_MMXEXT_INLINE */
#if HAVE_SSE2_INLINE
#undef COMPILE_TEMPLATE_MMXEXT
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_SSSE3
#define COMPILE_TEMPLATE_MMXEXT 0
#define COMPILE_TEMPLATE_SSE2 1
#define COMPILE_TEMPLATE_SSSE3 0
#undef RENAME
#undef RENAME_FDCT
#define RENAME(a) a ## _sse2
#define RENAME_FDCT(a) a ## _sse2
#include "mpegvideoenc_template.c"
#endif /* HAVE_SSE2_INLINE */
#if HAVE_SSSE3_INLINE
#undef COMPILE_TEMPLATE_MMXEXT
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_SSSE3
#define COMPILE_TEMPLATE_MMXEXT 0
#define COMPILE_TEMPLATE_SSE2 1
#define COMPILE_TEMPLATE_SSSE3 1
#undef RENAME
#undef RENAME_FDCT
#define RENAME(a) a ## _ssse3
#define RENAME_FDCT(a) a ## _sse2
#include "mpegvideoenc_template.c"
#endif /* HAVE_SSSE3_INLINE */
#endif /* HAVE_6REGS */
#if HAVE_INLINE_ASM
#if HAVE_MMX_INLINE
static void denoise_dct_mmx(MpegEncContext *s, int16_t *block){
const int intra= s->mb_intra;
int *sum= s->dct_error_sum[intra];
uint16_t *offset= s->dct_offset[intra];
s->dct_count[intra]++;
__asm__ volatile(
"pxor %%mm7, %%mm7 \n\t"
"1: \n\t"
"pxor %%mm0, %%mm0 \n\t"
"pxor %%mm1, %%mm1 \n\t"
"movq (%0), %%mm2 \n\t"
"movq 8(%0), %%mm3 \n\t"
"pcmpgtw %%mm2, %%mm0 \n\t"
"pcmpgtw %%mm3, %%mm1 \n\t"
"pxor %%mm0, %%mm2 \n\t"
"pxor %%mm1, %%mm3 \n\t"
"psubw %%mm0, %%mm2 \n\t"
"psubw %%mm1, %%mm3 \n\t"
"movq %%mm2, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"psubusw (%2), %%mm2 \n\t"
"psubusw 8(%2), %%mm3 \n\t"
"pxor %%mm0, %%mm2 \n\t"
"pxor %%mm1, %%mm3 \n\t"
"psubw %%mm0, %%mm2 \n\t"
"psubw %%mm1, %%mm3 \n\t"
"movq %%mm2, (%0) \n\t"
"movq %%mm3, 8(%0) \n\t"
"movq %%mm4, %%mm2 \n\t"
"movq %%mm5, %%mm3 \n\t"
"punpcklwd %%mm7, %%mm4 \n\t"
"punpckhwd %%mm7, %%mm2 \n\t"
"punpcklwd %%mm7, %%mm5 \n\t"
"punpckhwd %%mm7, %%mm3 \n\t"
"paddd (%1), %%mm4 \n\t"
"paddd 8(%1), %%mm2 \n\t"
"paddd 16(%1), %%mm5 \n\t"
"paddd 24(%1), %%mm3 \n\t"
"movq %%mm4, (%1) \n\t"
"movq %%mm2, 8(%1) \n\t"
"movq %%mm5, 16(%1) \n\t"
"movq %%mm3, 24(%1) \n\t"
"add $16, %0 \n\t"
"add $32, %1 \n\t"
"add $16, %2 \n\t"
"cmp %3, %0 \n\t"
" jb 1b \n\t"
: "+r" (block), "+r" (sum), "+r" (offset)
: "r"(block+64)
);
}
#endif /* HAVE_MMX_INLINE */
#if HAVE_SSE2_INLINE
static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){
const int intra= s->mb_intra;
int *sum= s->dct_error_sum[intra];
uint16_t *offset= s->dct_offset[intra];
s->dct_count[intra]++;
__asm__ volatile(
"pxor %%xmm7, %%xmm7 \n\t"
"1: \n\t"
"pxor %%xmm0, %%xmm0 \n\t"
"pxor %%xmm1, %%xmm1 \n\t"
"movdqa (%0), %%xmm2 \n\t"
"movdqa 16(%0), %%xmm3 \n\t"
"pcmpgtw %%xmm2, %%xmm0 \n\t"
"pcmpgtw %%xmm3, %%xmm1 \n\t"
"pxor %%xmm0, %%xmm2 \n\t"
"pxor %%xmm1, %%xmm3 \n\t"
"psubw %%xmm0, %%xmm2 \n\t"
"psubw %%xmm1, %%xmm3 \n\t"
"movdqa %%xmm2, %%xmm4 \n\t"
"movdqa %%xmm3, %%xmm5 \n\t"
"psubusw (%2), %%xmm2 \n\t"
"psubusw 16(%2), %%xmm3 \n\t"
"pxor %%xmm0, %%xmm2 \n\t"
"pxor %%xmm1, %%xmm3 \n\t"
"psubw %%xmm0, %%xmm2 \n\t"
"psubw %%xmm1, %%xmm3 \n\t"
"movdqa %%xmm2, (%0) \n\t"
"movdqa %%xmm3, 16(%0) \n\t"
"movdqa %%xmm4, %%xmm6 \n\t"
"movdqa %%xmm5, %%xmm0 \n\t"
"punpcklwd %%xmm7, %%xmm4 \n\t"
"punpckhwd %%xmm7, %%xmm6 \n\t"
"punpcklwd %%xmm7, %%xmm5 \n\t"
"punpckhwd %%xmm7, %%xmm0 \n\t"
"paddd (%1), %%xmm4 \n\t"
"paddd 16(%1), %%xmm6 \n\t"
"paddd 32(%1), %%xmm5 \n\t"
"paddd 48(%1), %%xmm0 \n\t"
"movdqa %%xmm4, (%1) \n\t"
"movdqa %%xmm6, 16(%1) \n\t"
"movdqa %%xmm5, 32(%1) \n\t"
"movdqa %%xmm0, 48(%1) \n\t"
"add $32, %0 \n\t"
"add $64, %1 \n\t"
"add $32, %2 \n\t"
"cmp %3, %0 \n\t"
" jb 1b \n\t"
: "+r" (block), "+r" (sum), "+r" (offset)
: "r"(block+64)
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7")
);
}
#endif /* HAVE_SSE2_INLINE */
#endif /* HAVE_INLINE_ASM */
av_cold void ff_dct_encode_init_x86(MpegEncContext *s)
{
const int dct_algo = s->avctx->dct_algo;
if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX) {
#if HAVE_MMX_INLINE
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags)) {
#if HAVE_6REGS
s->dct_quantize = dct_quantize_mmx;
#endif
s->denoise_dct = denoise_dct_mmx;
}
#endif
#if HAVE_6REGS && HAVE_MMXEXT_INLINE
if (INLINE_MMXEXT(cpu_flags))
s->dct_quantize = dct_quantize_mmxext;
#endif
#if HAVE_SSE2_INLINE
if (INLINE_SSE2(cpu_flags)) {
#if HAVE_6REGS
s->dct_quantize = dct_quantize_sse2;
#endif
s->denoise_dct = denoise_dct_sse2;
}
#endif
#if HAVE_6REGS && HAVE_SSSE3_INLINE
if (INLINE_SSSE3(cpu_flags))
s->dct_quantize = dct_quantize_ssse3;
#endif
}
}
| null | null | null | null | 71,697 |
1,309 | null |
train_val
|
83ed75feba32e46f736fcce0d96a0445f29b96c2
| 163,153 |
krb5
| 0 |
https://github.com/krb5/krb5
|
2016-01-27 15:43:28-05:00
|
/* @(#)rpc_commondata.c 2.1 88/07/29 4.0 RPCSRC */
/*
* Copyright (c) 2010, Oracle America, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the "Oracle America, Inc." nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gssrpc/rpc.h>
/*
* This file should only contain common data (global data) that is exported
* by public interfaces.
*
* Actually initialized to prevent creation of common blocks, which
* can be problematic on some architectures.
*/
/* RENAMED: should be _null_auth */
struct opaque_auth gssrpc__null_auth = {0};
#ifdef FD_SETSIZE
fd_set svc_fdset; /* Will be zeroed in data segment */
int gssrpc_svc_fdset_init = 0;
#else
int svc_fds = 0;
#endif /* def FD_SETSIZE */
struct rpc_createerr rpc_createerr = {RPC_SUCCESS};
int svc_maxfd = -1;
| null | null | null | null | 74,461 |
41,546 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 41,546 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/gn/ninja_bundle_data_target_writer.h"
#include <algorithm>
#include <sstream>
#include "testing/gtest/include/gtest/gtest.h"
#include "tools/gn/target.h"
#include "tools/gn/test_with_scope.h"
TEST(NinjaBundleDataTargetWriter, Run) {
Err err;
TestWithScope setup;
Target bundle_data(setup.settings(), Label(SourceDir("//foo/"), "data"));
bundle_data.set_output_type(Target::BUNDLE_DATA);
bundle_data.sources().push_back(SourceFile("//foo/input1.txt"));
bundle_data.sources().push_back(SourceFile("//foo/input2.txt"));
bundle_data.sources().push_back(
SourceFile("//foo/Foo.xcassets/Contents.json"));
bundle_data.sources().push_back(
SourceFile("//foo/Foo.xcassets/foo.imageset/Contents.json"));
bundle_data.sources().push_back(
SourceFile("//foo/Foo.xcassets/foo.imageset/FooIcon-29.png"));
bundle_data.sources().push_back(
SourceFile("//foo/Foo.xcassets/foo.imageset/FooIcon-29@2x.png"));
bundle_data.sources().push_back(
SourceFile("//foo/Foo.xcassets/foo.imageset/FooIcon-29@3x.png"));
bundle_data.action_values().outputs() = SubstitutionList::MakeForTest(
"{{bundle_resources_dir}}/{{source_file_part}}");
bundle_data.SetToolchain(setup.toolchain());
bundle_data.visibility().SetPublic();
ASSERT_TRUE(bundle_data.OnResolved(&err));
std::ostringstream out;
NinjaBundleDataTargetWriter writer(&bundle_data, out);
writer.Run();
const char expected[] =
"build obj/foo/data.stamp: stamp "
"../../foo/input1.txt "
"../../foo/input2.txt "
"../../foo/Foo.xcassets/Contents.json "
"../../foo/Foo.xcassets/foo.imageset/Contents.json "
"../../foo/Foo.xcassets/foo.imageset/FooIcon-29.png "
"../../foo/Foo.xcassets/foo.imageset/FooIcon-29@2x.png "
"../../foo/Foo.xcassets/foo.imageset/FooIcon-29@3x.png\n";
std::string out_str = out.str();
EXPECT_EQ(expected, out_str);
}
| null | null | null | null | 38,409 |
24,059 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 189,054 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright © 2006-2011 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
#include <drm/drmP.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "gma_display.h"
#include "power.h"
#include "cdv_device.h"
static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock);
#define CDV_LIMIT_SINGLE_LVDS_96 0
#define CDV_LIMIT_SINGLE_LVDS_100 1
#define CDV_LIMIT_DAC_HDMI_27 2
#define CDV_LIMIT_DAC_HDMI_96 3
#define CDV_LIMIT_DP_27 4
#define CDV_LIMIT_DP_100 5
static const struct gma_limit_t cdv_intel_limits[] = {
{ /* CDV_SINGLE_LVDS_96MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
.find_pll = gma_find_best_pll,
},
{ /* CDV_SINGLE_LVDS_100MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
/* The single-channel range is 25-112Mhz, and dual-channel
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
.find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_27MHz */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1809000, .max = 3564000},
.n = {.min = 1, .max = 1},
.m = {.min = 67, .max = 132},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 65, .max = 130},
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
.find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_96MHz */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
.find_pll = gma_find_best_pll,
},
{ /* CDV_DP_27MHz */
.dot = {.min = 160000, .max = 272000},
.vco = {.min = 1809000, .max = 3564000},
.n = {.min = 1, .max = 1},
.m = {.min = 67, .max = 132},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 65, .max = 130},
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
.find_pll = cdv_intel_find_dp_pll,
},
{ /* CDV_DP_100MHz */
.dot = {.min = 160000, .max = 272000},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 164},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 162},
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
.find_pll = cdv_intel_find_dp_pll,
}
};
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
if (W && !in_dbg_master()) \
msleep(W); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
{
int ret;
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle before read\n");
return ret;
}
REG_WRITE(SB_ADDR, reg);
REG_WRITE(SB_PCKT,
SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
SET_FIELD(SB_DEST_DPLL, SB_DEST) |
SET_FIELD(0xf, SB_BYTE_ENABLE));
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle after read\n");
return ret;
}
*val = REG_READ(SB_DATA);
return 0;
}
int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
{
int ret;
static bool dpio_debug = true;
u32 temp;
if (dpio_debug) {
if (cdv_sb_read(dev, reg, &temp) == 0)
DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
}
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle before write\n");
return ret;
}
REG_WRITE(SB_ADDR, reg);
REG_WRITE(SB_DATA, val);
REG_WRITE(SB_PCKT,
SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
SET_FIELD(SB_DEST_DPLL, SB_DEST) |
SET_FIELD(0xf, SB_BYTE_ENABLE));
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle after write\n");
return ret;
}
if (dpio_debug) {
if (cdv_sb_read(dev, reg, &temp) == 0)
DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
}
return 0;
}
/* Reset the DPIO configuration register. The BIOS does this at every
* mode set.
*/
void cdv_sb_reset(struct drm_device *dev)
{
REG_WRITE(DPIO_CFG, 0);
REG_READ(DPIO_CFG);
REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
}
/* Unlike most Intel display engines, on Cedarview the DPLL registers
* are behind this sideband bus. They must be programmed while the
* DPLL reference clock is on in the DPLL control register, but before
* the DPLL is enabled in the DPLL control register.
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
u32 ref_value;
u32 lane_reg, lane_value;
cdv_sb_reset(dev);
REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
udelay(100);
/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
ref_value = 0x68A701;
cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
/*
* The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
* for the pipe A/B. Display spec 1.06 has wrong definition.
* Correct definition is like below:
*
* refclka mean use clock from same PLL
*
* if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
*
* if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
*
*/
ret = cdv_sb_read(dev, ref_sfr, &ref_value);
if (ret)
return ret;
ref_value &= ~(REF_CLK_MASK);
/* use DPLL_A for pipeB on CRT/HDMI */
if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
DRM_DEBUG_KMS("use DPLLA for pipe B\n");
ref_value |= REF_CLK_DPLLA;
} else {
DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
ref_value |= REF_CLK_DPLL;
}
ret = cdv_sb_write(dev, ref_sfr, ref_value);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_M(pipe), &m);
if (ret)
return ret;
m &= ~SB_M_DIVIDER_MASK;
m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
ret = cdv_sb_write(dev, SB_M(pipe), m);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
if (ret)
return ret;
/* Follow the BIOS to program the N_DIVIDER REG */
n_vco &= 0xFFFF;
n_vco |= 0x107;
n_vco &= ~(SB_N_VCO_SEL_MASK |
SB_N_DIVIDER_MASK |
SB_N_CB_TUNE_MASK);
n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
if (clock->vco < 2250000) {
n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
} else if (clock->vco < 2750000) {
n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
} else if (clock->vco < 3300000) {
n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
} else {
n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
}
ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_P(pipe), &p);
if (ret)
return ret;
p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
switch (clock->p2) {
case 5:
p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
break;
case 10:
p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
break;
case 14:
p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
break;
case 7:
p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
break;
default:
DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
return -EINVAL;
}
ret = cdv_sb_write(dev, SB_P(pipe), p);
if (ret)
return ret;
if (ddi_select) {
if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
lane_reg = PSB_LANE0;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
lane_reg = PSB_LANE1;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
} else {
lane_reg = PSB_LANE2;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
lane_reg = PSB_LANE3;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
}
}
return 0;
}
static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
int refclk)
{
const struct gma_limit_t *limit;
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* Now only single-channel LVDS is supported on CDV. If it is
* incorrect, please add the dual-channel LVDS.
*/
if (refclk == 96000)
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
else
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
else
limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
} else {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
else
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
}
return limit;
}
/* m1 is reserved as 0 in CDV, n is a ring counter */
static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
clock->vco = (refclk * clock->m) / clock->n;
clock->dot = clock->vco / clock->p;
}
static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk,
struct gma_clock_t *best_clock)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
switch (refclk) {
case 27000:
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 0;
clock.m2 = 118;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 0;
clock.m2 = 98;
}
break;
case 100000:
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 5;
clock.m1 = 0;
clock.m2 = 160;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 5;
clock.m1 = 0;
clock.m2 = 133;
}
break;
default:
return false;
}
gma_crtc->clock_funcs->clock(refclk, &clock);
memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
return true;
}
#define FIFO_PIPEA (1 << 0)
#define FIFO_PIPEB (1 << 1)
static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = NULL;
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
gma_crtc = to_gma_crtc(crtc);
if (crtc->primary->fb == NULL || !gma_crtc->active)
return false;
return true;
}
void cdv_disable_sr(struct drm_device *dev)
{
if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
/* Disable self-refresh before adjust WM */
REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
REG_READ(FW_BLC_SELF);
gma_wait_for_vblank(dev);
/* Cedarview workaround to write ovelay plane, which force to leave
* MAX_FIFO state.
*/
REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
REG_READ(OV_OVADD);
gma_wait_for_vblank(dev);
}
}
void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
/* Is only one pipe enabled? */
if (cdv_intel_pipe_enabled(dev, 0) ^ cdv_intel_pipe_enabled(dev, 1)) {
u32 fw;
fw = REG_READ(DSPFW1);
fw &= ~DSP_FIFO_SR_WM_MASK;
fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
fw &= ~CURSOR_B_FIFO_WM_MASK;
fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
REG_WRITE(DSPFW1, fw);
fw = REG_READ(DSPFW2);
fw &= ~CURSOR_A_FIFO_WM_MASK;
fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
REG_WRITE(DSPFW2, fw);
REG_WRITE(DSPFW3, 0x36000000);
/* ignore FW4 */
/* Is pipe b lvds ? */
if (gma_crtc->pipe == 1 &&
gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
REG_WRITE(DSPFW5, 0x00040330);
} else {
fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
(4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
(3 << CURSOR_B_FIFO_WM1_SHIFT) |
(4 << CURSOR_FIFO_SR_WM1_SHIFT);
REG_WRITE(DSPFW5, fw);
}
REG_WRITE(DSPFW6, 0x10);
gma_wait_for_vblank(dev);
/* enable self-refresh for single pipe active */
REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
REG_READ(FW_BLC_SELF);
gma_wait_for_vblank(dev);
} else {
/* HW team suggested values... */
REG_WRITE(DSPFW1, 0x3f880808);
REG_WRITE(DSPFW2, 0x0b020202);
REG_WRITE(DSPFW3, 0x24000000);
REG_WRITE(DSPFW4, 0x08030202);
REG_WRITE(DSPFW5, 0x01010101);
REG_WRITE(DSPFW6, 0x1d0);
gma_wait_for_vblank(dev);
dev_priv->ops->disable_sr(dev);
}
}
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
pfit_control = REG_READ(PFIT_CONTROL);
/* See if the panel fitter is in use */
if ((pfit_control & PFIT_ENABLE) == 0)
return -1;
return (pfit_control >> 29) & 0x3;
}
static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_crt = false, is_lvds = false, is_tv = false;
bool is_hdmi = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const struct gma_limit_t *limit;
u32 ddi_select = 0;
bool is_edp = false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct gma_encoder *gma_encoder =
gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
ddi_select = gma_encoder->ddi_select;
switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
case INTEL_OUTPUT_HDMI:
is_hdmi = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_edp = true;
break;
default:
DRM_ERROR("invalid output type.\n");
return 0;
}
}
if (dev_priv->dplla_96mhz)
/* low-end sku, 96/100 mhz */
refclk = 96000;
else
/* high-end sku, 27/100 mhz */
refclk = 27000;
if (is_dp || is_edp) {
/*
* Based on the spec the low-end SKU has only CRT/LVDS. So it is
* unnecessary to consider it for DP/eDP.
* On the high-end SKU, it will use the 27/100M reference clk
* for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
* it will be 27MHz. From the VBIOS code it seems that the pipe A choose
* 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
*/
if (pipe == 0)
refclk = 27000;
else
refclk = 100000;
}
if (is_lvds && dev_priv->lvds_use_ssc) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
}
drm_mode_debug_printmodeline(adjusted_mode);
limit = gma_crtc->clock_funcs->limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
adjusted_mode->clock, clock.dot);
return 0;
}
dpll = DPLL_VGA_MODE_DIS;
if (is_tv) {
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
}
/* dpll |= PLL_REF_INPUT_DREFCLK; */
if (is_dp || is_edp) {
cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
}
dpll |= DPLL_SYNCLOCK_ENABLE;
/* if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL; */
/* dpll |= (2 << 11); */
/* setup pipeconf */
pipeconf = REG_READ(map->conf);
pipeconf &= ~(PIPE_BPC_MASK);
if (is_edp) {
switch (dev_priv->edp.bpp) {
case 24:
pipeconf |= PIPE_8BPC;
break;
case 18:
pipeconf |= PIPE_6BPC;
break;
case 30:
pipeconf |= PIPE_10BPC;
break;
default:
pipeconf |= PIPE_8BPC;
break;
}
} else if (is_lvds) {
/* the BPC will be 6 if it is 18-bit LVDS panel */
if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
pipeconf |= PIPE_8BPC;
else
pipeconf |= PIPE_6BPC;
} else
pipeconf |= PIPE_8BPC;
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
dspcntr |= DISPPLANE_SEL_PIPE_A;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
REG_READ(map->dpll);
cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
udelay(150);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
u32 lvds = REG_READ(LVDS);
lvds |=
LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
LVDS_PIPEB_SELECT;
/* Set the B0-B3 data pairs corresponding to
* whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more
* thoroughly into how panels behave in the two modes.
*/
REG_WRITE(LVDS, lvds);
REG_READ(LVDS);
}
dpll |= DPLL_VCO_ENABLE;
/* Disable the panel fitter if it was on our pipe */
if (cdv_intel_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
REG_WRITE(map->dpll,
(REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
dev_err(dev->dev, "Failed to get DPLL lock\n");
return -EBUSY;
}
{
int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
}
REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
REG_WRITE(map->pos, 0);
REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
{
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
gma_wait_for_vblank(dev);
return 0;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
/* FIXME: why are we using this, should it be cdv_ in this tree ? */
static void i8xx_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
}
/* Returns the clock of the currently programmed mode of the given pipe. */
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct gma_clock_t clock;
bool is_lvds;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = REG_READ(map->fp0);
else
fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = p->fp0;
else
fp = p->fp1;
is_lvds = (pipe == 1) &&
(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
}
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
if (is_lvds) {
clock.p1 =
ffs((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
if (clock.p1 == 0) {
clock.p1 = 4;
dev_err(dev->dev, "PLL %d\n", dpll);
}
clock.p2 = 14;
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
i8xx_clock(66000, &clock);
} else
i8xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
else {
clock.p1 =
((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
}
if (dpll & PLL_P2_DIVIDE_BY_4)
clock.p2 = 4;
else
clock.p2 = 2;
i8xx_clock(48000, &clock);
}
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
*/
return clock.dot;
}
/** Returns the currently programmed mode of the given pipe. */
struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
if (gma_power_begin(dev, false)) {
htot = REG_READ(map->htotal);
hsync = REG_READ(map->hsync);
vtot = REG_READ(map->vtotal);
vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
htot = p->htotal;
hsync = p->hsync;
vtot = p->vtotal;
vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
mode->vdisplay = (vtot & 0xffff) + 1;
mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
mode->vsync_start = (vsync & 0xffff) + 1;
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
return mode;
}
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = gma_crtc_dpms,
.mode_set = cdv_intel_crtc_mode_set,
.mode_set_base = gma_pipe_set_base,
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
.disable = gma_crtc_disable,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
};
const struct gma_clock_funcs cdv_clock_funcs = {
.clock = cdv_intel_clock,
.limit = cdv_intel_limit,
.pll_is_valid = gma_pll_is_valid,
};
| null | null | null | null | 97,401 |
22,181 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 22,181 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/site_per_process_browsertest.h"
#include <tuple>
#include "base/command_line.h"
#include "base/feature_list.h"
#include "base/json/json_reader.h"
#include "base/test/bind_test_util.h"
#include "base/test/scoped_feature_list.h"
#include "base/test/test_timeouts.h"
#include "build/build_config.h"
#include "components/viz/common/features.h"
#include "content/browser/renderer_host/cursor_manager.h"
#include "content/browser/renderer_host/input/synthetic_tap_gesture.h"
#include "content/browser/renderer_host/render_widget_host_input_event_router.h"
#include "content/browser/renderer_host/render_widget_host_view_child_frame.h"
#include "content/common/frame_messages.h"
#include "content/common/view_messages.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/common/content_features.h"
#include "content/public/common/content_switches.h"
#include "content/public/common/screen_info.h"
#include "content/public/common/use_zoom_for_dsf_policy.h"
#include "content/public/test/content_browser_test_utils.h"
#include "content/public/test/test_navigation_observer.h"
#include "content/public/test/test_utils.h"
#include "content/shell/common/shell_switches.h"
#include "content/test/mock_overscroll_observer.h"
#include "ui/display/display_switches.h"
#include "ui/events/base_event_utils.h"
#include "ui/events/gesture_detection/gesture_configuration.h"
#if defined(USE_AURA)
#include "content/browser/renderer_host/render_widget_host_view_aura.h"
#include "content/public/browser/overscroll_configuration.h"
#include "content/test/mock_overscroll_controller_delegate_aura.h"
#endif
#if defined(OS_MACOSX)
#include "ui/base/test/scoped_preferred_scroller_style_mac.h"
#endif
#if defined(OS_ANDROID)
#include "content/browser/renderer_host/render_widget_host_view_android.h"
#include "content/test/mock_overscroll_refresh_handler_android.h"
#endif
namespace content {
namespace {
class RenderWidgetHostMouseEventMonitor {
public:
explicit RenderWidgetHostMouseEventMonitor(RenderWidgetHost* host)
: host_(host), event_received_(false) {
mouse_callback_ =
base::Bind(&RenderWidgetHostMouseEventMonitor::MouseEventCallback,
base::Unretained(this));
host_->AddMouseEventCallback(mouse_callback_);
}
~RenderWidgetHostMouseEventMonitor() {
host_->RemoveMouseEventCallback(mouse_callback_);
}
bool EventWasReceived() const { return event_received_; }
void ResetEventReceived() { event_received_ = false; }
const blink::WebMouseEvent& event() const { return event_; }
private:
bool MouseEventCallback(const blink::WebMouseEvent& event) {
event_received_ = true;
event_ = event;
return false;
}
RenderWidgetHost::MouseEventCallback mouse_callback_;
RenderWidgetHost* host_;
bool event_received_;
blink::WebMouseEvent event_;
DISALLOW_COPY_AND_ASSIGN(RenderWidgetHostMouseEventMonitor);
};
class TestInputEventObserver : public RenderWidgetHost::InputEventObserver {
public:
explicit TestInputEventObserver(RenderWidgetHost* host) : host_(host) {
host_->AddInputEventObserver(this);
}
~TestInputEventObserver() override { host_->RemoveInputEventObserver(this); }
bool EventWasReceived() const { return !events_received_.empty(); }
void ResetEventsReceived() { events_received_.clear(); }
blink::WebInputEvent::Type EventType() const {
DCHECK(EventWasReceived());
return events_received_.front();
}
const std::vector<blink::WebInputEvent::Type>& events_received() {
return events_received_;
}
const blink::WebInputEvent& event() const { return *event_; }
void OnInputEvent(const blink::WebInputEvent& event) override {
events_received_.push_back(event.GetType());
event_ = ui::WebInputEventTraits::Clone(event);
};
private:
RenderWidgetHost* host_;
std::vector<blink::WebInputEvent::Type> events_received_;
ui::WebScopedInputEvent event_;
DISALLOW_COPY_AND_ASSIGN(TestInputEventObserver);
};
// |position_in_widget| is in the coord space of |rwhv|.
template <typename PointType>
void SetWebEventPositions(blink::WebPointerProperties* event,
const PointType& position_in_widget,
RenderWidgetHostViewBase* rwhv,
RenderWidgetHostViewBase* rwhv_root) {
event->SetPositionInWidget(gfx::PointF(position_in_widget));
const gfx::PointF position_in_root =
rwhv->TransformPointToRootCoordSpaceF(event->PositionInWidget());
const gfx::PointF point_in_screen =
position_in_root + rwhv_root->GetViewBounds().OffsetFromOrigin();
event->SetPositionInScreen(point_in_screen.x(), point_in_screen.y());
}
// For convenience when setting the position in the space of the root RWHV.
template <typename PointType>
void SetWebEventPositions(blink::WebPointerProperties* event,
const PointType& position_in_widget,
RenderWidgetHostViewBase* rwhv_root) {
DCHECK(!rwhv_root->IsRenderWidgetHostViewChildFrame());
SetWebEventPositions(event, position_in_widget, rwhv_root, rwhv_root);
}
#if defined(USE_AURA)
// |event->location()| is in the coord space of |rwhv|.
void UpdateEventRootLocation(ui::LocatedEvent* event,
RenderWidgetHostViewBase* rwhv,
RenderWidgetHostViewBase* rwhv_root) {
const gfx::Point position_in_root =
rwhv->TransformPointToRootCoordSpace(event->location());
gfx::Point root_location = position_in_root;
aura::Window::ConvertPointToTarget(
rwhv_root->GetNativeView(), rwhv_root->GetNativeView()->GetRootWindow(),
&root_location);
event->set_root_location(root_location);
}
// For convenience when setting the position in the space of the root RWHV.
void UpdateEventRootLocation(ui::LocatedEvent* event,
RenderWidgetHostViewBase* rwhv_root) {
DCHECK(!rwhv_root->IsRenderWidgetHostViewChildFrame());
UpdateEventRootLocation(event, rwhv_root, rwhv_root);
}
#endif // defined(USE_AURA)
void RouteMouseEventAndWaitUntilDispatch(
RenderWidgetHostInputEventRouter* router,
RenderWidgetHostViewBase* root_view,
RenderWidgetHostViewBase* expected_target,
blink::WebMouseEvent* event) {
InputEventAckWaiter waiter(expected_target->GetRenderWidgetHost(),
event->GetType());
router->RouteMouseEvent(root_view, event, ui::LatencyInfo());
waiter.Wait();
}
void DispatchMouseEventAndWaitUntilDispatch(
WebContentsImpl* web_contents,
RenderWidgetHostViewBase* location_view,
const gfx::PointF& location,
RenderWidgetHostViewBase* expected_target,
const gfx::PointF& expected_location) {
auto* router = web_contents->GetInputEventRouter();
RenderWidgetHostMouseEventMonitor monitor(
expected_target->GetRenderWidgetHost());
gfx::PointF root_location =
location_view->TransformPointToRootCoordSpaceF(location);
blink::WebMouseEvent down_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
down_event.button = blink::WebPointerProperties::Button::kLeft;
down_event.click_count = 1;
FrameTreeNode* root = web_contents->GetFrameTree()->root();
auto* root_view = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
SetWebEventPositions(&down_event, root_location, root_view);
RouteMouseEventAndWaitUntilDispatch(router, root_view, expected_target,
&down_event);
EXPECT_TRUE(monitor.EventWasReceived());
EXPECT_NEAR(expected_location.x(), monitor.event().PositionInWidget().x, 2);
EXPECT_NEAR(expected_location.y(), monitor.event().PositionInWidget().y, 2);
}
// Helper function that performs a surface hittest.
void SurfaceHitTestTestHelper(
Shell* shell,
net::test_server::EmbeddedTestServer* embedded_test_server) {
GURL main_url(embedded_test_server->GetURL(
"/frame_tree/page_with_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell, main_url));
auto* web_contents = static_cast<WebContentsImpl*>(shell->web_contents());
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewBase* rwhv_root = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_child,
gfx::PointF(5, 5), rwhv_child,
gfx::PointF(5, 5));
DispatchMouseEventAndWaitUntilDispatch(
web_contents, rwhv_root, gfx::PointF(2, 2), rwhv_root, gfx::PointF(2, 2));
}
void OverlapSurfaceHitTestHelper(
Shell* shell,
net::test_server::EmbeddedTestServer* embedded_test_server) {
GURL main_url(embedded_test_server->GetURL(
"/frame_tree/page_with_content_overlap_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell, main_url));
auto* web_contents = static_cast<WebContentsImpl*>(shell->web_contents());
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewBase* rwhv_root = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
gfx::PointF parent_location = gfx::PointF(5, 5);
parent_location =
rwhv_child->TransformPointToRootCoordSpaceF(parent_location);
DispatchMouseEventAndWaitUntilDispatch(
web_contents, rwhv_child, gfx::PointF(5, 5), rwhv_root, parent_location);
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_child,
gfx::PointF(95, 95), rwhv_child,
gfx::PointF(95, 95));
}
// Helper function that performs a surface hittest in nested frame.
void NestedSurfaceHitTestTestHelper(
Shell* shell,
net::test_server::EmbeddedTestServer* embedded_test_server) {
auto* web_contents = static_cast<WebContentsImpl*>(shell->web_contents());
GURL main_url(embedded_test_server->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell, main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* parent_iframe_node = root->child_at(0);
GURL site_url(embedded_test_server->GetURL(
"a.com", "/frame_tree/page_with_positioned_frame.html"));
EXPECT_EQ(site_url, parent_iframe_node->current_url());
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
parent_iframe_node->current_frame_host()->GetSiteInstance());
FrameTreeNode* nested_iframe_node = parent_iframe_node->child_at(0);
GURL nested_site_url(embedded_test_server->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(nested_site_url, nested_iframe_node->current_url());
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
nested_iframe_node->current_frame_host()->GetSiteInstance());
EXPECT_NE(parent_iframe_node->current_frame_host()->GetSiteInstance(),
nested_iframe_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewBase* rwhv_nested =
static_cast<RenderWidgetHostViewBase*>(
nested_iframe_node->current_frame_host()
->GetRenderWidgetHost()
->GetView());
WaitForChildFrameSurfaceReady(nested_iframe_node->current_frame_host());
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_nested,
gfx::PointF(10, 10), rwhv_nested,
gfx::PointF(10, 10));
}
void HitTestLayerSquashing(
Shell* shell,
net::test_server::EmbeddedTestServer* embedded_test_server) {
GURL main_url(embedded_test_server->GetURL(
"/frame_tree/oopif_hit_test_layer_squashing.html"));
EXPECT_TRUE(NavigateToURL(shell, main_url));
auto* web_contents = static_cast<WebContentsImpl*>(shell->web_contents());
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewBase* rwhv_root = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
gfx::Vector2dF child_offset = rwhv_child->GetViewBounds().origin() -
rwhv_root->GetViewBounds().origin();
// Send a mouse-down on #B. The main-frame should receive it.
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_root,
gfx::PointF(195, 11), rwhv_root,
gfx::PointF(195, 11));
// Send another event just below. The child-frame should receive it.
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_root,
gfx::PointF(195, 30), rwhv_child,
gfx::PointF(195, 30) - child_offset);
// Send a mouse-down on #C.
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_root,
gfx::PointF(35, 195), rwhv_root,
gfx::PointF(35, 195));
// Send a mouse-down to the right of #C so that it goes to the child frame.
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_root,
gfx::PointF(55, 195), rwhv_child,
gfx::PointF(55, 195) - child_offset);
// Send a mouse-down to the right-bottom edge of the iframe.
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_root,
gfx::PointF(195, 235), rwhv_child,
gfx::PointF(195, 235) - child_offset);
}
void HitTestWatermark(
Shell* shell,
net::test_server::EmbeddedTestServer* embedded_test_server) {
GURL main_url(embedded_test_server->GetURL(
"/frame_tree/oopif_hit_test_watermark.html"));
EXPECT_TRUE(NavigateToURL(shell, main_url));
auto* web_contents = static_cast<WebContentsImpl*>(shell->web_contents());
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewBase* rwhv_root = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
gfx::Vector2dF child_offset = rwhv_child->GetViewBounds().origin() -
rwhv_root->GetViewBounds().origin();
const gfx::PointF child_location(100, 120);
// Send a mouse-down at the center of the iframe. This should go to the
// main-frame (since there's a translucent div on top of it).
DispatchMouseEventAndWaitUntilDispatch(web_contents, rwhv_child,
child_location, rwhv_root,
child_location + child_offset);
// Set 'pointer-events: none' on the div.
EXPECT_TRUE(ExecuteScript(web_contents, "W.style.pointerEvents = 'none';"));
// Dispatch another event at the same location. It should reach the oopif this
// time.
DispatchMouseEventAndWaitUntilDispatch(
web_contents, rwhv_child, child_location, rwhv_child, child_location);
}
// This helper accounts for Android devices which use page scale factor
// different from 1.0. Coordinate targeting needs to be adjusted before
// hit testing.
double GetPageScaleFactor(Shell* shell) {
return RenderWidgetHostImpl::From(
shell->web_contents()->GetRenderViewHost()->GetWidget())
->last_frame_metadata()
.page_scale_factor;
}
#if defined(USE_AURA)
bool ConvertJSONToPoint(const std::string& str, gfx::PointF* point) {
std::unique_ptr<base::Value> value = base::JSONReader::Read(str);
if (!value)
return false;
base::DictionaryValue* root;
if (!value->GetAsDictionary(&root))
return false;
double x, y;
if (!root->GetDouble("x", &x))
return false;
if (!root->GetDouble("y", &y))
return false;
point->set_x(x);
point->set_y(y);
return true;
}
bool ConvertJSONToRect(const std::string& str, gfx::Rect* rect) {
std::unique_ptr<base::Value> value = base::JSONReader::Read(str);
if (!value)
return false;
base::DictionaryValue* root;
if (!value->GetAsDictionary(&root))
return false;
int x, y, width, height;
if (!root->GetInteger("x", &x))
return false;
if (!root->GetInteger("y", &y))
return false;
if (!root->GetInteger("width", &width))
return false;
if (!root->GetInteger("height", &height))
return false;
rect->set_x(x);
rect->set_y(y);
rect->set_width(width);
rect->set_height(height);
return true;
}
#endif // defined(USE_AURA)
} // namespace
class SitePerProcessHitTestBrowserTest
: public testing::WithParamInterface<std::tuple<int, float>>,
public SitePerProcessBrowserTest {
public:
SitePerProcessHitTestBrowserTest() {}
protected:
void SetUpCommandLine(base::CommandLine* command_line) override {
SitePerProcessBrowserTest::SetUpCommandLine(command_line);
if (std::get<0>(GetParam()) == 1) {
feature_list_.InitAndEnableFeature(features::kEnableVizHitTestDrawQuad);
} else if (std::get<0>(GetParam()) == 2) {
feature_list_.InitAndEnableFeature(
features::kEnableVizHitTestSurfaceLayer);
}
}
base::test::ScopedFeatureList feature_list_;
};
//
// SitePerProcessHighDPIHitTestBrowserTest
//
class SitePerProcessHighDPIHitTestBrowserTest
: public SitePerProcessHitTestBrowserTest {
public:
const double kDeviceScaleFactor = 2.0;
SitePerProcessHighDPIHitTestBrowserTest() {}
protected:
void SetUpCommandLine(base::CommandLine* command_line) override {
SitePerProcessHitTestBrowserTest::SetUpCommandLine(command_line);
command_line->AppendSwitchASCII(
switches::kForceDeviceScaleFactor,
base::StringPrintf("%f", kDeviceScaleFactor));
}
};
//
// SitePerProcessNonIntegerScaleFactorHitTestBrowserTest
//
class SitePerProcessNonIntegerScaleFactorHitTestBrowserTest
: public SitePerProcessHitTestBrowserTest {
public:
const double kDeviceScaleFactor = 1.5;
SitePerProcessNonIntegerScaleFactorHitTestBrowserTest() {}
protected:
void SetUpCommandLine(base::CommandLine* command_line) override {
SitePerProcessHitTestBrowserTest::SetUpCommandLine(command_line);
command_line->AppendSwitchASCII(
switches::kForceDeviceScaleFactor,
base::StringPrintf("%f", kDeviceScaleFactor));
}
};
// Restrict to Aura to we can use routable MouseWheel event via
// RenderWidgetHostViewAura::OnScrollEvent().
#if defined(USE_AURA)
class SitePerProcessInternalsHitTestBrowserTest
: public SitePerProcessHitTestBrowserTest {
public:
SitePerProcessInternalsHitTestBrowserTest() {}
protected:
void SetUpCommandLine(base::CommandLine* command_line) override {
SitePerProcessHitTestBrowserTest::SetUpCommandLine(command_line);
command_line->AppendSwitch(switches::kExposeInternalsForTesting);
// Needed to guarantee the scrollable div we're testing with is not given
// its own compositing layer.
command_line->AppendSwitch(switches::kDisablePreferCompositingToLCDText);
command_line->AppendSwitchASCII(
switches::kForceDeviceScaleFactor,
base::StringPrintf("%f", std::get<1>(GetParam())));
}
};
IN_PROC_BROWSER_TEST_P(SitePerProcessInternalsHitTestBrowserTest,
ScrollNestedLocalNonFastScrollableDiv) {
GURL main_url(embedded_test_server()->GetURL(
"a.com", "/cross_site_iframe_factory.html?a(b)"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* parent_iframe_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL(
"b.com", "/tall_page_with_local_iframe.html"));
NavigateFrameToURL(parent_iframe_node, site_url);
FrameTreeNode* nested_iframe_node = parent_iframe_node->child_at(0);
WaitForChildFrameSurfaceReady(nested_iframe_node->current_frame_host());
EXPECT_EQ(
" Site A ------------ proxies for B\n"
" +--Site B ------- proxies for A\n"
" +--Site B -- proxies for A\n"
"Where A = http://a.com/\n"
" B = http://b.com/",
DepictFrameTree(root));
const char* get_element_location_script_fmt =
"var rect = "
"document.getElementById('%s').getBoundingClientRect();\n"
"var point = {\n"
" x: rect.left,\n"
" y: rect.top\n"
"};\n"
"window.domAutomationController.send(JSON.stringify(point));";
// Since the nested local b-frame shares the RenderWidgetHostViewChildFrame
// with the parent frame, we need to query element offsets in both documents
// before converting to root space coordinates for the wheel event.
std::string str;
EXPECT_TRUE(ExecuteScriptAndExtractString(
nested_iframe_node->current_frame_host(),
base::StringPrintf(get_element_location_script_fmt, "scrollable_div"),
&str));
gfx::PointF nested_point_f;
ConvertJSONToPoint(str, &nested_point_f);
EXPECT_TRUE(ExecuteScriptAndExtractString(
parent_iframe_node->current_frame_host(),
base::StringPrintf(get_element_location_script_fmt, "nested_frame"),
&str));
gfx::PointF parent_offset_f;
ConvertJSONToPoint(str, &parent_offset_f);
// Compute location for wheel event.
gfx::PointF point_f(parent_offset_f.x() + nested_point_f.x() + 5.f,
parent_offset_f.y() + nested_point_f.y() + 5.f);
RenderWidgetHostViewChildFrame* rwhv_nested =
static_cast<RenderWidgetHostViewChildFrame*>(
nested_iframe_node->current_frame_host()
->GetRenderWidgetHost()
->GetView());
point_f = rwhv_nested->TransformPointToRootCoordSpaceF(point_f);
RenderWidgetHostViewAura* rwhv_root = static_cast<RenderWidgetHostViewAura*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
gfx::PointF nested_in_parent;
rwhv_root->TransformPointToCoordSpaceForView(
point_f,
parent_iframe_node->current_frame_host()
->GetRenderWidgetHost()
->GetView(),
&nested_in_parent);
// Get original scroll position.
double div_scroll_top_start;
EXPECT_TRUE(ExecuteScriptAndExtractDouble(
nested_iframe_node->current_frame_host(),
"window.domAutomationController.send("
"document.getElementById('scrollable_div').scrollTop);",
&div_scroll_top_start));
EXPECT_EQ(0.0, div_scroll_top_start);
// Wait until renderer's compositor thread is synced. Otherwise the non fast
// scrollable regions won't be set when the event arrives.
MainThreadFrameObserver observer(rwhv_nested->GetRenderWidgetHost());
observer.Wait();
// Send a wheel to scroll the div.
gfx::Point location(point_f.x(), point_f.y());
ui::ScrollEvent scroll_event(ui::ET_SCROLL, location, ui::EventTimeForNow(),
0, 0, -ui::MouseWheelEvent::kWheelDelta, 0,
ui::MouseWheelEvent::kWheelDelta,
2); // This must be '2' or it gets silently
// dropped.
UpdateEventRootLocation(&scroll_event, rwhv_root);
InputEventAckWaiter ack_observer(
parent_iframe_node->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kGestureScrollUpdate);
rwhv_root->OnScrollEvent(&scroll_event);
ack_observer.Wait();
// Check compositor layers.
EXPECT_TRUE(ExecuteScriptAndExtractString(
nested_iframe_node->current_frame_host(),
"window.domAutomationController.send("
"window.internals.layerTreeAsText(document));",
&str));
// We expect the nested OOPIF to not have any compositor layers.
EXPECT_EQ(std::string(), str);
// Verify the div scrolled.
double div_scroll_top = div_scroll_top_start;
EXPECT_TRUE(ExecuteScriptAndExtractDouble(
nested_iframe_node->current_frame_host(),
"window.domAutomationController.send("
"document.getElementById('scrollable_div').scrollTop);",
&div_scroll_top));
EXPECT_NE(div_scroll_top_start, div_scroll_top);
}
IN_PROC_BROWSER_TEST_P(SitePerProcessInternalsHitTestBrowserTest,
NestedLocalNonFastScrollableDivCoordsAreLocal) {
GURL main_url(embedded_test_server()->GetURL(
"a.com", "/cross_site_iframe_factory.html?a(b)"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* parent_iframe_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL(
"b.com", "/tall_page_with_local_iframe.html"));
NavigateFrameToURL(parent_iframe_node, site_url);
FrameTreeNode* nested_iframe_node = parent_iframe_node->child_at(0);
WaitForChildFrameSurfaceReady(nested_iframe_node->current_frame_host());
EXPECT_EQ(
" Site A ------------ proxies for B\n"
" +--Site B ------- proxies for A\n"
" +--Site B -- proxies for A\n"
"Where A = http://a.com/\n"
" B = http://b.com/",
DepictFrameTree(root));
const char* get_element_location_script_fmt =
"var rect = "
"document.getElementById('%s').getBoundingClientRect();\n"
"var point = {\n"
" x: rect.left,\n"
" y: rect.top\n"
"};\n"
"window.domAutomationController.send(JSON.stringify(point));";
// Since the nested local b-frame shares the RenderWidgetHostViewChildFrame
// with the parent frame, we need to query element offsets in both documents
// before converting to root space coordinates for the wheel event.
std::string str;
EXPECT_TRUE(ExecuteScriptAndExtractString(
nested_iframe_node->current_frame_host(),
base::StringPrintf(get_element_location_script_fmt, "scrollable_div"),
&str));
gfx::PointF nested_point_f;
ConvertJSONToPoint(str, &nested_point_f);
int num_non_fast_region_rects;
EXPECT_TRUE(ExecuteScriptAndExtractInt(
parent_iframe_node->current_frame_host(),
"window.internals.markGestureScrollRegionDirty(document);\n"
"window.internals.forceCompositingUpdate(document);\n"
"var rects = window.internals.nonFastScrollableRects(document);\n"
"window.domAutomationController.send(rects.length);",
&num_non_fast_region_rects));
EXPECT_EQ(1, num_non_fast_region_rects);
EXPECT_TRUE(ExecuteScriptAndExtractString(
parent_iframe_node->current_frame_host(),
"var rect = {\n"
" x: rects[0].left,\n"
" y: rects[0].top,\n"
" width: rects[0].width,\n"
" height: rects[0].height\n"
"};\n"
"window.domAutomationController.send(JSON.stringify(rect));",
&str));
gfx::Rect non_fast_scrollable_rect_before_scroll;
ConvertJSONToRect(str, &non_fast_scrollable_rect_before_scroll);
EXPECT_TRUE(ExecuteScriptAndExtractString(
parent_iframe_node->current_frame_host(),
base::StringPrintf(get_element_location_script_fmt, "nested_frame"),
&str));
gfx::PointF parent_offset_f;
ConvertJSONToPoint(str, &parent_offset_f);
// Compute location for wheel event to scroll the parent with respect to the
// mainframe.
gfx::PointF point_f(parent_offset_f.x() + 1.f, parent_offset_f.y() + 1.f);
RenderWidgetHostViewChildFrame* rwhv_parent =
static_cast<RenderWidgetHostViewChildFrame*>(
parent_iframe_node->current_frame_host()
->GetRenderWidgetHost()
->GetView());
point_f = rwhv_parent->TransformPointToRootCoordSpaceF(point_f);
RenderWidgetHostViewAura* rwhv_root = static_cast<RenderWidgetHostViewAura*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
gfx::PointF nested_in_parent;
rwhv_root->TransformPointToCoordSpaceForView(
point_f,
parent_iframe_node->current_frame_host()
->GetRenderWidgetHost()
->GetView(),
&nested_in_parent);
// Get original scroll position.
double div_scroll_top_start;
EXPECT_TRUE(
ExecuteScriptAndExtractDouble(parent_iframe_node->current_frame_host(),
"window.domAutomationController.send("
"document.body.scrollTop);",
&div_scroll_top_start));
EXPECT_EQ(0.0, div_scroll_top_start);
// Send a wheel to scroll the parent containing the div.
gfx::Point location(point_f.x(), point_f.y());
ui::ScrollEvent scroll_event(ui::ET_SCROLL, location, ui::EventTimeForNow(),
0, 0, -ui::MouseWheelEvent::kWheelDelta, 0,
ui::MouseWheelEvent::kWheelDelta,
2); // This must be '2' or it gets silently
// dropped.
UpdateEventRootLocation(&scroll_event, rwhv_root);
InputEventAckWaiter ack_observer(
parent_iframe_node->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kGestureScrollUpdate);
rwhv_root->OnScrollEvent(&scroll_event);
ack_observer.Wait();
MainThreadFrameObserver thread_observer(rwhv_parent->GetRenderWidgetHost());
thread_observer.Wait();
// Check compositor layers.
EXPECT_TRUE(ExecuteScriptAndExtractString(
nested_iframe_node->current_frame_host(),
"window.domAutomationController.send("
"window.internals.layerTreeAsText(document));",
&str));
// We expect the nested OOPIF to not have any compositor layers.
EXPECT_EQ(std::string(), str);
// Verify the div scrolled.
double div_scroll_top = div_scroll_top_start;
EXPECT_TRUE(
ExecuteScriptAndExtractDouble(parent_iframe_node->current_frame_host(),
"window.domAutomationController.send("
"document.body.scrollTop);",
&div_scroll_top));
EXPECT_NE(div_scroll_top_start, div_scroll_top);
// Verify the non-fast scrollable region rect is the same, even though the
// parent scroll isn't.
EXPECT_TRUE(ExecuteScriptAndExtractInt(
parent_iframe_node->current_frame_host(),
"window.internals.markGestureScrollRegionDirty(document);\n"
"window.internals.forceCompositingUpdate(document);\n"
"var rects = window.internals.nonFastScrollableRects(document);\n"
"window.domAutomationController.send(rects.length);",
&num_non_fast_region_rects));
EXPECT_EQ(1, num_non_fast_region_rects);
EXPECT_TRUE(ExecuteScriptAndExtractString(
parent_iframe_node->current_frame_host(),
"var rect = {\n"
" x: rects[0].left,\n"
" y: rects[0].top,\n"
" width: rects[0].width,\n"
" height: rects[0].height\n"
"};\n"
"window.domAutomationController.send(JSON.stringify(rect));",
&str));
gfx::Rect non_fast_scrollable_rect_after_scroll;
ConvertJSONToRect(str, &non_fast_scrollable_rect_after_scroll);
EXPECT_EQ(non_fast_scrollable_rect_before_scroll,
non_fast_scrollable_rect_after_scroll);
}
#endif // defined(USE_AURA)
// Tests that wheel scroll bubbling gets cancelled when the wheel target view
// gets destroyed in the middle of a wheel scroll seqeunce. This happens in
// cases like overscroll navigation from inside an oopif.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
CancelWheelScrollBubblingOnWheelTargetDeletion) {
ui::GestureConfiguration::GetInstance()->set_scroll_debounce_interval_in_ms(
0);
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* iframe_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, iframe_node->current_url());
RenderWidgetHostViewBase* root_view = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* child_rwhv = static_cast<RenderWidgetHostViewBase*>(
iframe_node->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostInputEventRouter* router =
static_cast<WebContentsImpl*>(shell()->web_contents())
->GetInputEventRouter();
WaitForChildFrameSurfaceReady(iframe_node->current_frame_host());
InputEventAckWaiter scroll_begin_observer(
root->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kGestureScrollBegin);
InputEventAckWaiter scroll_end_observer(
root->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kGestureScrollEnd);
// Scroll the iframe upward, scroll events get bubbled up to the root.
blink::WebMouseWheelEvent scroll_event(
blink::WebInputEvent::kMouseWheel, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
gfx::Rect bounds = child_rwhv->GetViewBounds();
float scale_factor = GetPageScaleFactor(shell());
gfx::Point position_in_widget(
gfx::ToCeiledInt((bounds.x() - root_view->GetViewBounds().x() + 5) *
scale_factor),
gfx::ToCeiledInt((bounds.y() - root_view->GetViewBounds().y() + 5) *
scale_factor));
SetWebEventPositions(&scroll_event, position_in_widget, root_view);
scroll_event.delta_x = 0.0f;
scroll_event.delta_y = 5.0f;
scroll_event.phase = blink::WebMouseWheelEvent::kPhaseBegan;
scroll_event.has_precise_scrolling_deltas = true;
router->RouteMouseWheelEvent(root_view, &scroll_event, ui::LatencyInfo());
scroll_begin_observer.Wait();
// Now destroy the child_rwhv, scroll bubbling stops and a GSE gets sent to
// the root_view.
RenderProcessHost* rph =
iframe_node->current_frame_host()->GetSiteInstance()->GetProcess();
RenderProcessHostWatcher crash_observer(
rph, RenderProcessHostWatcher::WATCH_FOR_PROCESS_EXIT);
EXPECT_TRUE(rph->Shutdown(0));
crash_observer.Wait();
scroll_event.delta_y = 0.0f;
scroll_event.phase = blink::WebMouseWheelEvent::kPhaseEnded;
scroll_event.dispatch_type =
blink::WebInputEvent::DispatchType::kEventNonBlocking;
router->RouteMouseWheelEvent(root_view, &scroll_event, ui::LatencyInfo());
scroll_end_observer.Wait();
}
// When a scroll event is bubbled, ensure that the bubbled event's coordinates
// are correctly updated to the ancestor's coordinate space. In particular,
// ensure that the transformation considers CSS scaling of the child where
// simply applying the ancestor's offset does not produce the correct
// coordinates in the ancestor's coordinate space.
// See https://crbug.com/817392
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
BubbledScrollEventsTransformedCorrectly) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_scaled_frame.html"));
ASSERT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* iframe_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, iframe_node->current_url());
RenderWidgetHostViewBase* root_rwhv = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostInputEventRouter* router =
static_cast<WebContentsImpl*>(shell()->web_contents())
->GetInputEventRouter();
WaitForChildFrameSurfaceReady(iframe_node->current_frame_host());
const float scale_factor = GetPageScaleFactor(shell());
// Due to the CSS scaling of the iframe, the position in the child view's
// coordinates is (96, 96) and not (48, 48) (or approximately these values
// if there's rounding due to the scale factor).
const gfx::Point position_in_root(gfx::ToCeiledInt(150 * scale_factor),
gfx::ToCeiledInt(150 * scale_factor));
auto expect_gsb_with_position = base::BindRepeating(
[](const gfx::Point& expected_position, content::InputEventAckSource,
content::InputEventAckState, const blink::WebInputEvent& event) {
if (event.GetType() != blink::WebInputEvent::kGestureScrollBegin)
return false;
const blink::WebGestureEvent& gesture_event =
static_cast<const blink::WebGestureEvent&>(event);
EXPECT_NEAR(expected_position.x(), gesture_event.PositionInWidget().x,
1);
EXPECT_NEAR(expected_position.y(), gesture_event.PositionInWidget().y,
1);
return true;
});
InputEventAckWaiter root_scroll_begin_observer(
root_rwhv->GetRenderWidgetHost(),
base::BindRepeating(expect_gsb_with_position, position_in_root));
// Scroll the iframe upward, scroll events get bubbled up to the root.
blink::WebMouseWheelEvent scroll_event(
blink::WebInputEvent::kMouseWheel, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
SetWebEventPositions(&scroll_event, position_in_root, root_rwhv);
scroll_event.delta_x = 0.0f;
scroll_event.delta_y = 5.0f;
scroll_event.phase = blink::WebMouseWheelEvent::kPhaseBegan;
scroll_event.has_precise_scrolling_deltas = true;
router->RouteMouseWheelEvent(root_rwhv, &scroll_event, ui::LatencyInfo());
root_scroll_begin_observer.Wait();
}
#if defined(USE_AURA) || defined(OS_ANDROID)
// When unconsumed scrolls in a child bubble to the root and start an
// overscroll gesture, the subsequent gesture scroll update events should be
// consumed by the root. The child should not be able to scroll during the
// overscroll gesture.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
RootConsumesScrollDuringOverscrollGesture) {
GURL main_url(embedded_test_server()->GetURL(
"a.com", "/cross_site_iframe_factory.html?a(b)"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
RenderWidgetHostViewBase* rwhv_root = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
#if defined(USE_AURA)
// The child must be horizontally scrollable.
GURL child_url(embedded_test_server()->GetURL("b.com", "/wide_page.html"));
#elif defined(OS_ANDROID)
// The child must be vertically scrollable.
GURL child_url(embedded_test_server()->GetURL("b.com", "/tall_page.html"));
#endif
NavigateFrameToURL(child_node, child_url);
EXPECT_EQ(
" Site A ------------ proxies for B\n"
" +--Site B ------- proxies for A\n"
"Where A = http://a.com/\n"
" B = http://b.com/",
DepictFrameTree(root));
RenderWidgetHostViewChildFrame* rwhv_child =
static_cast<RenderWidgetHostViewChildFrame*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
ASSERT_TRUE(rwhv_root->IsScrollOffsetAtTop());
ASSERT_TRUE(rwhv_child->IsScrollOffsetAtTop());
RenderWidgetHostInputEventRouter* router =
static_cast<WebContentsImpl*>(shell()->web_contents())
->GetInputEventRouter();
{
// Set up the RenderWidgetHostInputEventRouter to send the gesture stream
// to the child.
const gfx::Rect root_bounds = rwhv_root->GetViewBounds();
const gfx::Rect child_bounds = rwhv_child->GetViewBounds();
const float page_scale_factor = GetPageScaleFactor(shell());
const gfx::PointF point_in_child(
(child_bounds.x() - root_bounds.x() + 10) * page_scale_factor,
(child_bounds.y() - root_bounds.y() + 10) * page_scale_factor);
gfx::PointF dont_care;
ASSERT_EQ(rwhv_child->GetRenderWidgetHost(),
router->GetRenderWidgetHostAtPoint(rwhv_root, point_in_child,
&dont_care));
blink::WebTouchEvent touch_event(
blink::WebInputEvent::kTouchStart, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
touch_event.touches_length = 1;
touch_event.touches[0].state = blink::WebTouchPoint::kStatePressed;
SetWebEventPositions(&touch_event.touches[0], point_in_child, rwhv_root);
touch_event.unique_touch_event_id = 1;
InputEventAckWaiter waiter(rwhv_child->GetRenderWidgetHost(),
blink::WebInputEvent::kTouchStart);
router->RouteTouchEvent(rwhv_root, &touch_event,
ui::LatencyInfo(ui::SourceEventType::TOUCH));
// With async hit testing, make sure the target for the initial TouchStart
// is resolved before sending the rest of the stream.
waiter.Wait();
blink::WebGestureEvent gesture_event(
blink::WebInputEvent::kGestureTapDown,
blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests(),
blink::kWebGestureDeviceTouchscreen);
gesture_event.unique_touch_event_id = touch_event.unique_touch_event_id;
router->RouteGestureEvent(rwhv_root, &gesture_event,
ui::LatencyInfo(ui::SourceEventType::TOUCH));
}
#if defined(USE_AURA)
RenderWidgetHostViewAura* rwhva =
static_cast<RenderWidgetHostViewAura*>(rwhv_root);
std::unique_ptr<MockOverscrollControllerDelegateAura>
mock_overscroll_delegate =
std::make_unique<MockOverscrollControllerDelegateAura>(rwhva);
rwhva->overscroll_controller()->set_delegate(mock_overscroll_delegate.get());
MockOverscrollObserver* mock_overscroll_observer =
mock_overscroll_delegate.get();
#elif defined(OS_ANDROID)
RenderWidgetHostViewAndroid* rwhv_android =
static_cast<RenderWidgetHostViewAndroid*>(rwhv_root);
std::unique_ptr<MockOverscrollRefreshHandlerAndroid> mock_overscroll_handler =
std::make_unique<MockOverscrollRefreshHandlerAndroid>();
rwhv_android->SetOverscrollControllerForTesting(
mock_overscroll_handler.get());
MockOverscrollObserver* mock_overscroll_observer =
mock_overscroll_handler.get();
#endif // defined(USE_AURA)
InputEventAckWaiter gesture_begin_observer_child(
child_node->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kGestureScrollBegin);
InputEventAckWaiter gesture_end_observer_child(
child_node->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kGestureScrollEnd);
#if defined(USE_AURA)
const float overscroll_threshold = OverscrollConfig::GetThreshold(
OverscrollConfig::Threshold::kStartTouchscreen);
#elif defined(OS_ANDROID)
const float overscroll_threshold = 0.f;
#endif
// First we need our scroll to initiate an overscroll gesture in the root
// via unconsumed scrolls in the child.
blink::WebGestureEvent gesture_scroll_begin(
blink::WebGestureEvent::kGestureScrollBegin,
blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests(),
blink::kWebGestureDeviceTouchscreen);
gesture_scroll_begin.unique_touch_event_id = 1;
gesture_scroll_begin.data.scroll_begin.delta_hint_units =
blink::WebGestureEvent::ScrollUnits::kPrecisePixels;
gesture_scroll_begin.data.scroll_begin.delta_x_hint = 0.f;
gesture_scroll_begin.data.scroll_begin.delta_y_hint = 0.f;
#if defined(USE_AURA)
// For aura, we scroll horizontally to activate an overscroll navigation.
gesture_scroll_begin.data.scroll_begin.delta_x_hint =
overscroll_threshold + 1;
#elif defined(OS_ANDROID)
// For android, we scroll vertically to activate pull-to-refresh.
gesture_scroll_begin.data.scroll_begin.delta_y_hint =
overscroll_threshold + 1;
#endif
router->RouteGestureEvent(rwhv_root, &gesture_scroll_begin,
ui::LatencyInfo(ui::SourceEventType::TOUCH));
// Make sure the child is indeed receiving the gesture stream.
gesture_begin_observer_child.Wait();
blink::WebGestureEvent gesture_scroll_update(
blink::WebGestureEvent::kGestureScrollUpdate,
blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests(),
blink::kWebGestureDeviceTouchscreen);
gesture_scroll_update.unique_touch_event_id = 1;
gesture_scroll_update.data.scroll_update.delta_units =
blink::WebGestureEvent::ScrollUnits::kPrecisePixels;
gesture_scroll_update.data.scroll_update.delta_x = 0.f;
gesture_scroll_update.data.scroll_update.delta_y = 0.f;
#if defined(USE_AURA)
float* delta = &gesture_scroll_update.data.scroll_update.delta_x;
#elif defined(OS_ANDROID)
float* delta = &gesture_scroll_update.data.scroll_update.delta_y;
#endif
*delta = overscroll_threshold + 1;
mock_overscroll_observer->Reset();
// This will bring us into an overscroll gesture.
router->RouteGestureEvent(rwhv_root, &gesture_scroll_update,
ui::LatencyInfo(ui::SourceEventType::TOUCH));
// Note that in addition to verifying that we get the overscroll update, it
// is necessary to wait before sending the next event to prevent our multiple
// GestureScrollUpdates from being coalesced.
mock_overscroll_observer->WaitForUpdate();
// This scroll is in the same direction and so it will contribute to the
// overscroll.
*delta = 10.0f;
mock_overscroll_observer->Reset();
router->RouteGestureEvent(rwhv_root, &gesture_scroll_update,
ui::LatencyInfo(ui::SourceEventType::TOUCH));
mock_overscroll_observer->WaitForUpdate();
// Now we reverse direction. The child could scroll in this direction, but
// since we're in an overscroll gesture, the root should consume it.
*delta = -5.0f;
mock_overscroll_observer->Reset();
router->RouteGestureEvent(rwhv_root, &gesture_scroll_update,
ui::LatencyInfo(ui::SourceEventType::TOUCH));
mock_overscroll_observer->WaitForUpdate();
blink::WebGestureEvent gesture_scroll_end(
blink::WebGestureEvent::kGestureScrollEnd,
blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests(),
blink::kWebGestureDeviceTouchscreen);
gesture_scroll_end.unique_touch_event_id = 1;
gesture_scroll_end.data.scroll_end.delta_units =
blink::WebGestureEvent::ScrollUnits::kPrecisePixels;
mock_overscroll_observer->Reset();
router->RouteGestureEvent(rwhv_root, &gesture_scroll_end,
ui::LatencyInfo(ui::SourceEventType::TOUCH));
mock_overscroll_observer->WaitForEnd();
// Ensure that the method of providing the child's scroll events to the root
// does not leave the child in an invalid state.
gesture_end_observer_child.Wait();
}
#endif // defined(USE_AURA) || defined(OS_ANDROID)
// Test that an ET_SCROLL event sent to an out-of-process iframe correctly
// results in a scroll. This is only handled by RenderWidgetHostViewAura
// and is needed for trackpad scrolling on Chromebooks.
#if defined(USE_AURA)
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest, ScrollEventToOOPIF) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell()->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewAura* rwhv_parent =
static_cast<RenderWidgetHostViewAura*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
// Create listener for input events.
TestInputEventObserver child_frame_monitor(
child_node->current_frame_host()->GetRenderWidgetHost());
// Send a ui::ScrollEvent that will hit test to the child frame.
InputEventAckWaiter waiter(
child_node->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kMouseWheel);
ui::ScrollEvent scroll_event(ui::ET_SCROLL, gfx::Point(75, 75),
ui::EventTimeForNow(), ui::EF_NONE, 0,
10, // Offsets
0, 10, // Offset ordinals
2);
UpdateEventRootLocation(&scroll_event, rwhv_parent);
rwhv_parent->OnScrollEvent(&scroll_event);
waiter.Wait();
// Verify that this a mouse wheel event was sent to the child frame renderer.
EXPECT_TRUE(child_frame_monitor.EventWasReceived());
EXPECT_EQ(child_frame_monitor.EventType(), blink::WebInputEvent::kMouseWheel);
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
InputEventRouterWheelCoalesceTest) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell()->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewAura* rwhv_parent =
static_cast<RenderWidgetHostViewAura*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
RenderWidgetHostInputEventRouter* router =
web_contents()->GetInputEventRouter();
// Create listener for input events.
TestInputEventObserver child_frame_monitor(
child_node->current_frame_host()->GetRenderWidgetHost());
InputEventAckWaiter waiter(
child_node->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kMouseWheel);
// Send a mouse wheel event to child.
blink::WebMouseWheelEvent wheel_event(
blink::WebInputEvent::kMouseWheel, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
SetWebEventPositions(&wheel_event, gfx::Point(75, 75), rwhv_parent);
wheel_event.delta_x = 10;
wheel_event.delta_y = 20;
wheel_event.phase = blink::WebMouseWheelEvent::kPhaseBegan;
router->RouteMouseWheelEvent(rwhv_parent, &wheel_event, ui::LatencyInfo());
// Send more mouse wheel events to the child. Since we are waiting for the
// async targeting on the first event, these new mouse wheel events should
// be coalesced properly.
blink::WebMouseWheelEvent wheel_event1(
blink::WebInputEvent::kMouseWheel, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
SetWebEventPositions(&wheel_event1, gfx::Point(70, 70), rwhv_parent);
wheel_event1.delta_x = 12;
wheel_event1.delta_y = 22;
wheel_event1.phase = blink::WebMouseWheelEvent::kPhaseChanged;
router->RouteMouseWheelEvent(rwhv_parent, &wheel_event1, ui::LatencyInfo());
blink::WebMouseWheelEvent wheel_event2(
blink::WebInputEvent::kMouseWheel, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
SetWebEventPositions(&wheel_event2, gfx::Point(65, 65), rwhv_parent);
wheel_event2.delta_x = 14;
wheel_event2.delta_y = 24;
wheel_event2.phase = blink::WebMouseWheelEvent::kPhaseChanged;
router->RouteMouseWheelEvent(rwhv_parent, &wheel_event2, ui::LatencyInfo());
// Since we are targeting child, event dispatch should not happen
// synchronously. Validate that the expected target does not receive the
// event immediately.
EXPECT_FALSE(child_frame_monitor.EventWasReceived());
waiter.Wait();
EXPECT_TRUE(child_frame_monitor.EventWasReceived());
EXPECT_EQ(child_frame_monitor.EventType(), blink::WebInputEvent::kMouseWheel);
// Check if the two mouse-wheel update events are coalesced correctly.
const auto& gesture_event =
static_cast<const blink::WebGestureEvent&>(child_frame_monitor.event());
EXPECT_EQ(26 /* wheel_event1.delta_x + wheel_event2.delta_x */,
gesture_event.data.scroll_update.delta_x);
EXPECT_EQ(46 /* wheel_event1.delta_y + wheel_event2.delta_y */,
gesture_event.data.scroll_update.delta_y);
}
#endif // defined(USE_AURA)
// Test that mouse events are being routed to the correct RenderWidgetHostView
// based on coordinates.
#if defined(THREAD_SANITIZER)
// The test times out often on TSAN bot.
// https://crbug.com/591170.
#define MAYBE_SurfaceHitTestTest DISABLED_SurfaceHitTestTest
#else
#define MAYBE_SurfaceHitTestTest SurfaceHitTestTest
#endif
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
MAYBE_SurfaceHitTestTest) {
SurfaceHitTestTestHelper(shell(), embedded_test_server());
}
// Same test as above, but runs in high-dpi mode.
#if defined(OS_ANDROID) || defined(OS_WIN)
// High DPI browser tests are not needed on Android, and confuse some of the
// coordinate calculations. Android uses fixed device scale factor.
// Windows is disabled because of https://crbug.com/545547.
#define MAYBE_HighDPISurfaceHitTestTest DISABLED_SurfaceHitTestTest
#else
#define MAYBE_HighDPISurfaceHitTestTest SurfaceHitTestTest
#endif
IN_PROC_BROWSER_TEST_P(SitePerProcessHighDPIHitTestBrowserTest,
MAYBE_HighDPISurfaceHitTestTest) {
SurfaceHitTestTestHelper(shell(), embedded_test_server());
}
// Test that mouse events are being routed to the correct RenderWidgetHostView
// when there are nested out-of-process iframes.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
NestedSurfaceHitTestTest) {
NestedSurfaceHitTestTestHelper(shell(), embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHighDPIHitTestBrowserTest,
NestedSurfaceHitTestTest) {
NestedSurfaceHitTestTestHelper(shell(), embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
OverlapSurfaceHitTestTest) {
OverlapSurfaceHitTestHelper(shell(), embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHighDPIHitTestBrowserTest,
OverlapSurfaceHitTestTest) {
OverlapSurfaceHitTestHelper(shell(), embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
HitTestLayerSquashing) {
HitTestLayerSquashing(shell(), embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHighDPIHitTestBrowserTest,
HitTestLayerSquashing) {
HitTestLayerSquashing(shell(), embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest, HitTestWatermark) {
HitTestWatermark(shell(), embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHighDPIHitTestBrowserTest,
HitTestWatermark) {
HitTestWatermark(shell(), embedded_test_server());
}
// This test tests that browser process hittesting ignores frames with
// pointer-events: none.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
SurfaceHitTestPointerEventsNone) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_frame_pointer-events_none.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell()->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
// Create listeners for mouse events.
RenderWidgetHostMouseEventMonitor main_frame_monitor(
root->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor child_frame_monitor(
child_node->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostInputEventRouter* router =
web_contents()->GetInputEventRouter();
RenderWidgetHostViewBase* root_view = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
// Target input event to child frame.
blink::WebMouseEvent child_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
child_event.button = blink::WebPointerProperties::Button::kLeft;
SetWebEventPositions(&child_event, gfx::Point(75, 75), root_view);
child_event.click_count = 1;
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
InputEventAckWaiter waiter(root->current_frame_host()->GetRenderWidgetHost(),
blink::WebInputEvent::kMouseDown);
router->RouteMouseEvent(root_view, &child_event, ui::LatencyInfo());
waiter.Wait();
EXPECT_TRUE(main_frame_monitor.EventWasReceived());
EXPECT_NEAR(75, main_frame_monitor.event().PositionInWidget().x, 2);
EXPECT_NEAR(75, main_frame_monitor.event().PositionInWidget().y, 2);
EXPECT_FALSE(child_frame_monitor.EventWasReceived());
}
// Verify that an event is properly retargeted to the main frame when an
// asynchronous hit test to the child frame times out.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
AsynchronousHitTestChildTimeout) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_busy_frame.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
// Create listeners for mouse events.
RenderWidgetHostMouseEventMonitor main_frame_monitor(
root->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor child_frame_monitor(
child_node->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostInputEventRouter* router =
web_contents()->GetInputEventRouter();
// Shorten the timeout for purposes of this test.
router->GetRenderWidgetTargeterForTests()
->set_async_hit_test_timeout_delay_for_testing(
TestTimeouts::tiny_timeout());
RenderWidgetHostViewBase* root_view = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
// Target input event to child frame. It should get delivered to the main
// frame instead because the child frame main thread is non-responsive.
blink::WebMouseEvent child_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
child_event.button = blink::WebPointerProperties::Button::kLeft;
SetWebEventPositions(&child_event, gfx::Point(75, 75), root_view);
child_event.click_count = 1;
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
RouteMouseEventAndWaitUntilDispatch(router, root_view, root_view,
&child_event);
EXPECT_TRUE(main_frame_monitor.EventWasReceived());
EXPECT_NEAR(75, main_frame_monitor.event().PositionInWidget().x, 2);
EXPECT_NEAR(75, main_frame_monitor.event().PositionInWidget().y, 2);
EXPECT_FALSE(child_frame_monitor.EventWasReceived());
}
// This test verifies that MouseEnter and MouseLeave events fire correctly
// when the mouse cursor moves between processes.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
CrossProcessMouseEnterAndLeaveTest) {
GURL main_url(embedded_test_server()->GetURL(
"a.com", "/cross_site_iframe_factory.html?a(b,c(d))"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
EXPECT_EQ(
" Site A ------------ proxies for B C D\n"
" |--Site B ------- proxies for A C D\n"
" +--Site C ------- proxies for A B D\n"
" +--Site D -- proxies for A B C\n"
"Where A = http://a.com/\n"
" B = http://b.com/\n"
" C = http://c.com/\n"
" D = http://d.com/",
DepictFrameTree(root));
FrameTreeNode* b_node = root->child_at(0);
FrameTreeNode* c_node = root->child_at(1);
FrameTreeNode* d_node = c_node->child_at(0);
RenderWidgetHostViewBase* rwhv_a = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_b = static_cast<RenderWidgetHostViewBase*>(
b_node->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_d = static_cast<RenderWidgetHostViewBase*>(
d_node->current_frame_host()->GetRenderWidgetHost()->GetView());
// Verifying surfaces are ready in B and D are sufficient, since other
// surfaces contain at least one of them.
WaitForChildFrameSurfaceReady(b_node->current_frame_host());
WaitForChildFrameSurfaceReady(d_node->current_frame_host());
// Create listeners for mouse events. These are used to verify that the
// RenderWidgetHostInputEventRouter is generating MouseLeave, etc for
// the right renderers.
RenderWidgetHostMouseEventMonitor root_frame_monitor(
root->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor a_frame_monitor(
root->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor b_frame_monitor(
b_node->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor c_frame_monitor(
c_node->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor d_frame_monitor(
d_node->current_frame_host()->GetRenderWidgetHost());
float scale_factor = GetPageScaleFactor(shell());
// Get the view bounds of the child iframe, which should account for the
// relative offset of its direct parent within the root frame, for use in
// targeting the input event.
gfx::Rect a_bounds = rwhv_a->GetViewBounds();
gfx::Rect b_bounds = rwhv_b->GetViewBounds();
gfx::Rect d_bounds = rwhv_d->GetViewBounds();
gfx::Point point_in_a_frame(2, 2);
gfx::Point point_in_b_frame(
gfx::ToCeiledInt((b_bounds.x() - a_bounds.x() + 25) * scale_factor),
gfx::ToCeiledInt((b_bounds.y() - a_bounds.y() + 25) * scale_factor));
gfx::Point point_in_d_frame(
gfx::ToCeiledInt((d_bounds.x() - a_bounds.x() + 25) * scale_factor),
gfx::ToCeiledInt((d_bounds.y() - a_bounds.y() + 25) * scale_factor));
blink::WebMouseEvent mouse_event(
blink::WebInputEvent::kMouseMove, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
SetWebEventPositions(&mouse_event, point_in_a_frame, rwhv_a);
// Send an initial MouseMove to the root view, which shouldn't affect the
// other renderers.
web_contents()->GetInputEventRouter()->RouteMouseEvent(rwhv_a, &mouse_event,
ui::LatencyInfo());
EXPECT_TRUE(a_frame_monitor.EventWasReceived());
a_frame_monitor.ResetEventReceived();
EXPECT_FALSE(b_frame_monitor.EventWasReceived());
EXPECT_FALSE(c_frame_monitor.EventWasReceived());
EXPECT_FALSE(d_frame_monitor.EventWasReceived());
// Next send a MouseMove to B frame, which shouldn't affect C or D but
// A should receive a MouseMove event.
SetWebEventPositions(&mouse_event, point_in_b_frame, rwhv_a);
auto* router = web_contents()->GetInputEventRouter();
RouteMouseEventAndWaitUntilDispatch(router, rwhv_a, rwhv_b, &mouse_event);
EXPECT_TRUE(a_frame_monitor.EventWasReceived());
EXPECT_EQ(a_frame_monitor.event().GetType(),
blink::WebInputEvent::kMouseMove);
a_frame_monitor.ResetEventReceived();
EXPECT_TRUE(b_frame_monitor.EventWasReceived());
b_frame_monitor.ResetEventReceived();
EXPECT_FALSE(c_frame_monitor.EventWasReceived());
EXPECT_FALSE(d_frame_monitor.EventWasReceived());
// Next send a MouseMove to D frame, which should have side effects in every
// other RenderWidgetHostView.
SetWebEventPositions(&mouse_event, point_in_d_frame, rwhv_a);
RouteMouseEventAndWaitUntilDispatch(router, rwhv_a, rwhv_d, &mouse_event);
EXPECT_TRUE(a_frame_monitor.EventWasReceived());
EXPECT_EQ(a_frame_monitor.event().GetType(),
blink::WebInputEvent::kMouseMove);
EXPECT_TRUE(b_frame_monitor.EventWasReceived());
EXPECT_EQ(b_frame_monitor.event().GetType(),
blink::WebInputEvent::kMouseLeave);
EXPECT_TRUE(c_frame_monitor.EventWasReceived());
EXPECT_EQ(c_frame_monitor.event().GetType(),
blink::WebInputEvent::kMouseMove);
EXPECT_TRUE(d_frame_monitor.EventWasReceived());
}
// Verify that mouse capture works on a RenderWidgetHostView level, so that
// dragging scroll bars and selecting text continues even when the mouse
// cursor crosses over cross-process frame boundaries.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
CrossProcessMouseCapture) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell()->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
// Create listeners for mouse events.
RenderWidgetHostMouseEventMonitor main_frame_monitor(
root->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor child_frame_monitor(
child_node->current_frame_host()->GetRenderWidgetHost());
RenderWidgetHostInputEventRouter* router =
web_contents()->GetInputEventRouter();
RenderWidgetHostViewBase* root_view = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
float scale_factor = GetPageScaleFactor(shell());
// Get the view bounds of the child iframe, which should account for the
// relative offset of its direct parent within the root frame, for use in
// targeting the input event.
gfx::Rect bounds = rwhv_child->GetViewBounds();
int child_frame_target_x = gfx::ToCeiledInt(
(bounds.x() - root_view->GetViewBounds().x() + 5) * scale_factor);
int child_frame_target_y = gfx::ToCeiledInt(
(bounds.y() - root_view->GetViewBounds().y() + 5) * scale_factor);
// Target MouseDown to child frame.
blink::WebMouseEvent mouse_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
mouse_event.button = blink::WebPointerProperties::Button::kLeft;
SetWebEventPositions(&mouse_event,
gfx::Point(child_frame_target_x, child_frame_target_y),
root_view);
mouse_event.click_count = 1;
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
RouteMouseEventAndWaitUntilDispatch(router, root_view, rwhv_child,
&mouse_event);
EXPECT_FALSE(main_frame_monitor.EventWasReceived());
EXPECT_TRUE(child_frame_monitor.EventWasReceived());
// Target MouseMove to main frame. This should still be routed to the
// child frame because it is now capturing mouse input.
mouse_event.SetType(blink::WebInputEvent::kMouseMove);
mouse_event.SetModifiers(blink::WebInputEvent::kLeftButtonDown);
SetWebEventPositions(&mouse_event, gfx::Point(1, 1), root_view);
// Note that this event is sent twice, with the monitors cleared after
// the first time, because the first MouseMove to the child frame
// causes a MouseMove to be sent to the main frame also, which we
// need to ignore.
router->RouteMouseEvent(root_view, &mouse_event, ui::LatencyInfo());
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
SetWebEventPositions(&mouse_event, gfx::Point(1, 5), root_view);
RouteMouseEventAndWaitUntilDispatch(router, root_view, rwhv_child,
&mouse_event);
EXPECT_FALSE(main_frame_monitor.EventWasReceived());
EXPECT_TRUE(child_frame_monitor.EventWasReceived());
// A MouseUp to the child frame should cancel the mouse capture.
mouse_event.SetType(blink::WebInputEvent::kMouseUp);
mouse_event.SetModifiers(blink::WebInputEvent::kNoModifiers);
SetWebEventPositions(&mouse_event,
gfx::Point(child_frame_target_x, child_frame_target_y),
root_view);
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
RouteMouseEventAndWaitUntilDispatch(router, root_view, rwhv_child,
&mouse_event);
EXPECT_FALSE(main_frame_monitor.EventWasReceived());
EXPECT_TRUE(child_frame_monitor.EventWasReceived());
// Subsequent MouseMove events targeted to the main frame should be routed
// to that frame.
mouse_event.SetType(blink::WebInputEvent::kMouseMove);
SetWebEventPositions(&mouse_event, gfx::Point(1, 10), root_view);
// Sending the MouseMove twice for the same reason as above.
router->RouteMouseEvent(root_view, &mouse_event, ui::LatencyInfo());
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
SetWebEventPositions(&mouse_event, gfx::Point(1, 15), root_view);
router->RouteMouseEvent(root_view, &mouse_event, ui::LatencyInfo());
EXPECT_TRUE(main_frame_monitor.EventWasReceived());
EXPECT_FALSE(child_frame_monitor.EventWasReceived());
// Target MouseDown to the main frame to cause it to capture input.
mouse_event.SetType(blink::WebInputEvent::kMouseDown);
SetWebEventPositions(&mouse_event, gfx::Point(1, 20), root_view);
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
router->RouteMouseEvent(root_view, &mouse_event, ui::LatencyInfo());
EXPECT_TRUE(main_frame_monitor.EventWasReceived());
EXPECT_FALSE(child_frame_monitor.EventWasReceived());
// Sending a MouseMove to the child frame should still result in the main
// frame receiving the event.
mouse_event.SetType(blink::WebInputEvent::kMouseMove);
mouse_event.SetModifiers(blink::WebInputEvent::kLeftButtonDown);
SetWebEventPositions(&mouse_event,
gfx::Point(child_frame_target_x, child_frame_target_y),
root_view);
main_frame_monitor.ResetEventReceived();
child_frame_monitor.ResetEventReceived();
router->RouteMouseEvent(root_view, &mouse_event, ui::LatencyInfo());
EXPECT_TRUE(main_frame_monitor.EventWasReceived());
EXPECT_FALSE(child_frame_monitor.EventWasReceived());
}
// There are no cursors on Android.
#if !defined(OS_ANDROID)
class CursorMessageFilter : public content::BrowserMessageFilter {
public:
CursorMessageFilter()
: content::BrowserMessageFilter(ViewMsgStart),
message_loop_runner_(new content::MessageLoopRunner),
last_set_cursor_routing_id_(MSG_ROUTING_NONE) {}
bool OnMessageReceived(const IPC::Message& message) override {
if (message.type() == ViewHostMsg_SetCursor::ID) {
content::BrowserThread::PostTask(
content::BrowserThread::UI, FROM_HERE,
base::BindOnce(&CursorMessageFilter::OnSetCursor, this,
message.routing_id()));
}
return false;
}
void OnSetCursor(int routing_id) {
last_set_cursor_routing_id_ = routing_id;
message_loop_runner_->Quit();
}
int last_set_cursor_routing_id() const { return last_set_cursor_routing_id_; }
void Wait() {
// Do not reset the cursor, as the cursor may already have been set (and
// Quit() already called on |message_loop_runner_|).
message_loop_runner_->Run();
}
private:
~CursorMessageFilter() override {}
scoped_refptr<content::MessageLoopRunner> message_loop_runner_;
int last_set_cursor_routing_id_;
DISALLOW_COPY_AND_ASSIGN(CursorMessageFilter);
};
namespace {
// Verify that we receive a mouse cursor update message when we mouse over
// a text field contained in an out-of-process iframe.
void CursorUpdateReceivedFromCrossSiteIframeHelper(
Shell* shell,
net::test_server::EmbeddedTestServer* embedded_test_server) {
GURL main_url(embedded_test_server->GetURL(
"/frame_tree/page_with_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell, main_url));
auto* web_contents = static_cast<WebContentsImpl*>(shell->web_contents());
FrameTreeNode* root = web_contents->GetFrameTree()->root();
FrameTreeNode* child_node = root->child_at(0);
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
scoped_refptr<CursorMessageFilter> filter = new CursorMessageFilter();
child_node->current_frame_host()->GetProcess()->AddFilter(filter.get());
RenderWidgetHostViewBase* root_view = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHost* rwh_child =
root->child_at(0)->current_frame_host()->GetRenderWidgetHost();
RenderWidgetHostViewBase* child_view =
static_cast<RenderWidgetHostViewBase*>(rwh_child->GetView());
// This should only return nullptr on Android.
EXPECT_TRUE(root_view->GetCursorManager());
WebCursor cursor;
EXPECT_FALSE(
root_view->GetCursorManager()->GetCursorForTesting(root_view, cursor));
EXPECT_FALSE(
root_view->GetCursorManager()->GetCursorForTesting(child_view, cursor));
// Send a MouseMove to the subframe. The frame contains text, and moving the
// mouse over it should cause the renderer to send a mouse cursor update.
blink::WebMouseEvent mouse_event(
blink::WebInputEvent::kMouseMove, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
SetWebEventPositions(&mouse_event, gfx::Point(60, 60), root_view);
auto* router = web_contents->GetInputEventRouter();
RenderWidgetHostMouseEventMonitor child_monitor(
child_view->GetRenderWidgetHost());
RenderWidgetHostMouseEventMonitor root_monitor(
root_view->GetRenderWidgetHost());
RouteMouseEventAndWaitUntilDispatch(router, root_view, child_view,
&mouse_event);
// The child_view should receive a mouse-move event.
EXPECT_TRUE(child_monitor.EventWasReceived());
EXPECT_EQ(blink::WebInputEvent::kMouseMove, child_monitor.event().GetType());
EXPECT_NEAR(10, child_monitor.event().PositionInWidget().x, 2);
EXPECT_NEAR(10, child_monitor.event().PositionInWidget().y, 2);
// The root_view should also receive a mouse-move event.
EXPECT_TRUE(root_monitor.EventWasReceived());
EXPECT_EQ(blink::WebInputEvent::kMouseMove, root_monitor.event().GetType());
EXPECT_EQ(60, root_monitor.event().PositionInWidget().x);
EXPECT_EQ(60, root_monitor.event().PositionInWidget().y);
// CursorMessageFilter::Wait() implicitly tests whether we receive a
// ViewHostMsg_SetCursor message from the renderer process, because it does
// does not return otherwise.
filter->Wait();
EXPECT_EQ(filter->last_set_cursor_routing_id(), rwh_child->GetRoutingID());
// Yield to ensure that the SetCursor message is processed by its real
// handler.
{
base::RunLoop loop;
base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
loop.QuitClosure());
loop.Run();
}
// The root_view receives a mouse-move event on top of the iframe, which does
// not send a cursor update.
EXPECT_FALSE(
root_view->GetCursorManager()->GetCursorForTesting(root_view, cursor));
EXPECT_TRUE(
root_view->GetCursorManager()->GetCursorForTesting(child_view, cursor));
// Since this moused over a text box, this should not be the default cursor.
CursorInfo cursor_info;
cursor.GetCursorInfo(&cursor_info);
EXPECT_EQ(cursor_info.type, blink::WebCursorInfo::kTypeIBeam);
}
} // namespace
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
CursorUpdateReceivedFromCrossSiteIframe) {
CursorUpdateReceivedFromCrossSiteIframeHelper(shell(),
embedded_test_server());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessHighDPIHitTestBrowserTest,
CursorUpdateReceivedFromCrossSiteIframe) {
CursorUpdateReceivedFromCrossSiteIframeHelper(shell(),
embedded_test_server());
}
#endif // !defined(OS_ANDROID)
#if defined(USE_AURA)
// Browser process hit testing is not implemented on Android, and these tests
// require Aura for RenderWidgetHostViewAura::OnTouchEvent().
// https://crbug.com/491334
// Ensure that scroll events can be cancelled with a wheel handler.
// https://crbug.com/698195
class SitePerProcessMouseWheelHitTestBrowserTest
: public SitePerProcessHitTestBrowserTest {
public:
SitePerProcessMouseWheelHitTestBrowserTest() : rwhv_root_(nullptr) {}
void SetupWheelAndScrollHandlers(content::RenderFrameHostImpl* rfh) {
// Set up event handlers. The wheel event handler calls prevent default on
// alternate events, so only every other wheel generates a scroll. The fact
// that any scroll events fire is dependent on the event going to the main
// thread, which requires the nonFastScrollableRegion be set correctly
// on the compositor.
std::string script =
"wheel_count = 0;"
"function wheel_handler(e) {"
" wheel_count++;"
" if (wheel_count % 2 == 0)"
" e.preventDefault();\n"
" domAutomationController.send('wheel: ' + wheel_count);"
"}"
"function scroll_handler(e) {"
" domAutomationController.send('scroll: ' + wheel_count);"
"}"
"scroll_div = document.getElementById('scrollable_div');"
"scroll_div.addEventListener('wheel', wheel_handler);"
"scroll_div.addEventListener('scroll', scroll_handler);"
"document.body.style.background = 'black';";
content::DOMMessageQueue msg_queue;
std::string reply;
EXPECT_TRUE(ExecuteScript(rfh, script));
// Wait until renderer's compositor thread is synced. Otherwise the event
// handler won't be installed when the event arrives.
{
MainThreadFrameObserver observer(rfh->GetRenderWidgetHost());
observer.Wait();
}
}
void SendMouseWheel(gfx::Point location) {
DCHECK(rwhv_root_);
ui::ScrollEvent scroll_event(ui::ET_SCROLL, location, ui::EventTimeForNow(),
0, 0, -ui::MouseWheelEvent::kWheelDelta, 0,
ui::MouseWheelEvent::kWheelDelta,
2); // This must be '2' or it gets silently
// dropped.
UpdateEventRootLocation(&scroll_event, rwhv_root_);
rwhv_root_->OnScrollEvent(&scroll_event);
}
void set_rwhv_root(RenderWidgetHostViewAura* rwhv_root) {
rwhv_root_ = rwhv_root;
}
void RunTest(gfx::Point pos, RenderWidgetHostViewBase* expected_target) {
content::DOMMessageQueue msg_queue;
std::string reply;
auto* rwhv_root = static_cast<RenderWidgetHostViewAura*>(
web_contents()->GetRenderWidgetHostView());
set_rwhv_root(rwhv_root);
if (rwhv_root->wheel_scroll_latching_enabled()) {
// Set the wheel scroll latching timeout to a large value to make sure
// that the timer doesn't expire for the duration of the test.
rwhv_root->event_handler()->set_mouse_wheel_wheel_phase_handler_timeout(
TestTimeouts::action_max_timeout());
}
InputEventAckWaiter waiter(expected_target->GetRenderWidgetHost(),
blink::WebInputEvent::kMouseWheel);
SendMouseWheel(pos);
waiter.Wait();
// Expect both wheel and scroll handlers to fire.
EXPECT_TRUE(msg_queue.WaitForMessage(&reply));
EXPECT_EQ("\"wheel: 1\"", reply);
EXPECT_TRUE(msg_queue.WaitForMessage(&reply));
EXPECT_EQ("\"scroll: 1\"", reply);
SendMouseWheel(pos);
// If async_wheel_events is disabled, this time only the wheel handler
// fires, since even numbered scrolls are prevent-defaulted. If it is
// enabled, then this wheel event will be sent non-blockingly and won't be
// cancellable.
EXPECT_TRUE(msg_queue.WaitForMessage(&reply));
EXPECT_EQ("\"wheel: 2\"", reply);
if (base::FeatureList::IsEnabled(features::kAsyncWheelEvents) &&
base::FeatureList::IsEnabled(
features::kTouchpadAndWheelScrollLatching)) {
DCHECK(rwhv_root->wheel_scroll_latching_enabled());
EXPECT_TRUE(msg_queue.WaitForMessage(&reply));
EXPECT_EQ("\"scroll: 2\"", reply);
}
SendMouseWheel(pos);
// Odd number of wheels, expect both wheel and scroll handlers to fire
// again.
EXPECT_TRUE(msg_queue.WaitForMessage(&reply));
EXPECT_EQ("\"wheel: 3\"", reply);
EXPECT_TRUE(msg_queue.WaitForMessage(&reply));
EXPECT_EQ("\"scroll: 3\"", reply);
}
private:
RenderWidgetHostViewAura* rwhv_root_;
};
// Subclass to disable wheel scroll latching in failing tests.
// https://crbug.com/800822
class SitePerProcessMouseWheelHitTestBrowserTestWheelScrollLatchingDisabled
: public SitePerProcessMouseWheelHitTestBrowserTest {
public:
SitePerProcessMouseWheelHitTestBrowserTestWheelScrollLatchingDisabled() {}
void SetUp() override {
feature_list_.InitWithFeatures({},
{features::kTouchpadAndWheelScrollLatching,
features::kAsyncWheelEvents});
SitePerProcessMouseWheelHitTestBrowserTest::SetUp();
}
private:
base::test::ScopedFeatureList feature_list_;
};
IN_PROC_BROWSER_TEST_P(
SitePerProcessMouseWheelHitTestBrowserTestWheelScrollLatchingDisabled,
MultipleSubframeWheelEventsOnMainThread) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_two_positioned_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(2U, root->child_count());
GURL frame_url(embedded_test_server()->GetURL(
"b.com", "/page_with_scrollable_div.html"));
// To test for https://bugs.chromium.org/p/chromium/issues/detail?id=820232
// it's important that both subframes are in the same renderer process, so
// we load the same URL in each case.
NavigateFrameToURL(root->child_at(0), frame_url);
NavigateFrameToURL(root->child_at(1), frame_url);
for (int frame_index = 0; frame_index < 2; frame_index++) {
// Synchronize with the child and parent renderers to guarantee that the
// surface information required for event hit testing is ready.
RenderWidgetHostViewBase* child_rwhv =
static_cast<RenderWidgetHostViewBase*>(
root->child_at(frame_index)->current_frame_host()->GetView());
EXPECT_FALSE(child_rwhv->wheel_scroll_latching_enabled());
WaitForChildFrameSurfaceReady(
root->child_at(frame_index)->current_frame_host());
content::RenderFrameHostImpl* child =
root->child_at(frame_index)->current_frame_host();
SetupWheelAndScrollHandlers(child);
gfx::Rect bounds = child_rwhv->GetViewBounds();
gfx::Point pos(bounds.x() + 10, bounds.y() + 10);
RunTest(pos, child_rwhv);
}
}
// Verifies that test in SubframeWheelEventsOnMainThread also makes sense for
// the same page loaded in the mainframe.
IN_PROC_BROWSER_TEST_P(
SitePerProcessMouseWheelHitTestBrowserTestWheelScrollLatchingDisabled,
MainframeWheelEventsOnMainThread) {
GURL main_url(
embedded_test_server()->GetURL("/page_with_scrollable_div.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
content::RenderFrameHostImpl* rfhi = root->current_frame_host();
SetupWheelAndScrollHandlers(rfhi);
EXPECT_FALSE(
rfhi->GetRenderWidgetHost()->GetView()->wheel_scroll_latching_enabled());
gfx::Point pos(10, 10);
RunTest(pos, rfhi->GetRenderWidgetHost()->GetView());
}
IN_PROC_BROWSER_TEST_P(SitePerProcessMouseWheelHitTestBrowserTest,
InputEventRouterWheelTargetTest) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
auto* rwhv_root = static_cast<RenderWidgetHostViewAura*>(
web_contents()->GetRenderWidgetHostView());
set_rwhv_root(rwhv_root);
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
GURL frame_url(embedded_test_server()->GetURL(
"b.com", "/page_with_scrollable_div.html"));
NavigateFrameToURL(root->child_at(0), frame_url);
// Synchronize with the child and parent renderers to guarantee that the
// surface information required for event hit testing is ready.
RenderWidgetHostViewBase* child_rwhv = static_cast<RenderWidgetHostViewBase*>(
root->child_at(0)->current_frame_host()->GetView());
WaitForChildFrameSurfaceReady(root->child_at(0)->current_frame_host());
RenderWidgetHostInputEventRouter* router =
web_contents()->GetInputEventRouter();
// Send a mouse wheel event to child.
gfx::Rect bounds = child_rwhv->GetViewBounds();
gfx::Point pos(bounds.x() + 10, bounds.y() + 10);
InputEventAckWaiter waiter(child_rwhv->GetRenderWidgetHost(),
blink::WebInputEvent::kMouseWheel);
SendMouseWheel(pos);
waiter.Wait();
if (child_rwhv->wheel_scroll_latching_enabled())
EXPECT_EQ(child_rwhv, router->wheel_target_.target);
else
EXPECT_EQ(nullptr, router->wheel_target_.target);
// Send a mouse wheel event to the main frame. If wheel scroll latching is
// enabled it will be still routed to child till the end of current scrolling
// sequence. Since wheel scroll latching is enabled by default, we always do
// sync targeting so InputEventAckWaiter is not needed here.
TestInputEventObserver child_frame_monitor(child_rwhv->GetRenderWidgetHost());
SendMouseWheel(pos);
if (child_rwhv->wheel_scroll_latching_enabled())
EXPECT_EQ(child_rwhv, router->wheel_target_.target);
else
EXPECT_EQ(nullptr, router->wheel_target_.target);
// Verify that this a mouse wheel event was sent to the child frame renderer.
EXPECT_TRUE(child_frame_monitor.EventWasReceived());
EXPECT_EQ(child_frame_monitor.EventType(), blink::WebInputEvent::kMouseWheel);
// Kill the wheel target view process. This must reset the wheel_target_.
RenderProcessHost* child_process =
root->child_at(0)->current_frame_host()->GetProcess();
RenderProcessHostWatcher crash_observer(
child_process, RenderProcessHostWatcher::WATCH_FOR_PROCESS_EXIT);
child_process->Shutdown(0);
crash_observer.Wait();
EXPECT_EQ(nullptr, router->wheel_target_.target);
}
// Ensure that a cross-process subframe with a touch-handler can receive touch
// events.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
SubframeTouchEventRouting) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
WebContentsImpl* contents = web_contents();
FrameTreeNode* root = contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
GURL frame_url(
embedded_test_server()->GetURL("b.com", "/page_with_touch_handler.html"));
NavigateFrameToURL(root->child_at(0), frame_url);
// Synchronize with the child and parent renderers to guarantee that the
// surface information required for event hit testing is ready.
WaitForChildFrameSurfaceReady(root->child_at(0)->current_frame_host());
// There's no intrinsic reason the following values can't be equal, but they
// aren't at present, and if they become the same this test will need to be
// updated to accommodate.
EXPECT_NE(cc::kTouchActionAuto, cc::kTouchActionNone);
// Verify the child's input router is initially set for kTouchActionAuto. The
// TouchStart event will trigger kTouchActionNone being sent back to the
// browser.
RenderWidgetHostImpl* child_render_widget_host =
root->child_at(0)->current_frame_host()->GetRenderWidgetHost();
EXPECT_EQ(cc::kTouchActionAuto,
child_render_widget_host->input_router()->AllowedTouchAction());
InputEventAckWaiter waiter(child_render_widget_host,
blink::WebInputEvent::kTouchStart);
// Simulate touch event to sub-frame.
gfx::Point child_center(150, 150);
auto* rwhv = static_cast<RenderWidgetHostViewAura*>(
contents->GetRenderWidgetHostView());
// Wait until renderer's compositor thread is synced.
{
MainThreadFrameObserver observer(child_render_widget_host);
observer.Wait();
}
ui::TouchEvent touch_event(
ui::ET_TOUCH_PRESSED, child_center, ui::EventTimeForNow(),
ui::PointerDetails(ui::EventPointerType::POINTER_TYPE_TOUCH,
/* pointer_id*/ 0,
/* radius_x */ 30.0f,
/* radius_y */ 30.0f,
/* force */ 0.0f));
UpdateEventRootLocation(&touch_event, rwhv);
rwhv->OnTouchEvent(&touch_event);
waiter.Wait();
{
MainThreadFrameObserver observer(child_render_widget_host);
observer.Wait();
}
// Verify touch handler in subframe was invoked.
std::string result;
EXPECT_TRUE(ExecuteScriptAndExtractString(
root->child_at(0),
"window.domAutomationController.send(getLastTouchEvent());", &result));
EXPECT_EQ("touchstart", result);
// Verify the presence of the touch handler in the child frame correctly
// propagates touch-action:none information back to the child's input router.
EXPECT_EQ(cc::kTouchActionNone,
child_render_widget_host->input_router()->AllowedTouchAction());
}
// This test verifies that the test in
// SitePerProcessHitTestBrowserTest.SubframeTouchEventRouting also works
// properly for the main frame. Prior to the CL in which this test is
// introduced, use of MainThreadFrameObserver in SubframeTouchEventRouting was
// not necessary since the touch events were handled on the main thread. Now
// they are handled on the compositor thread, hence the need to synchronize.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
MainframeTouchEventRouting) {
GURL main_url(
embedded_test_server()->GetURL("/page_with_touch_handler.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
WebContentsImpl* contents = web_contents();
FrameTreeNode* root = contents->GetFrameTree()->root();
// Synchronize with the renderers to guarantee that the
// surface information required for event hit testing is ready.
auto* rwhv = static_cast<RenderWidgetHostViewAura*>(
contents->GetRenderWidgetHostView());
// There's no intrinsic reason the following values can't be equal, but they
// aren't at present, and if they become the same this test will need to be
// updated to accommodate.
EXPECT_NE(cc::kTouchActionAuto, cc::kTouchActionNone);
// Verify the main frame's input router is initially set for
// kTouchActionAuto. The
// TouchStart event will trigger kTouchActionNone being sent back to the
// browser.
RenderWidgetHostImpl* render_widget_host =
root->current_frame_host()->GetRenderWidgetHost();
EXPECT_EQ(cc::kTouchActionAuto,
render_widget_host->input_router()->AllowedTouchAction());
// Simulate touch event to sub-frame.
gfx::Point frame_center(150, 150);
// Wait until renderer's compositor thread is synced.
{
auto observer =
std::make_unique<MainThreadFrameObserver>(render_widget_host);
observer->Wait();
}
ui::TouchEvent touch_event(
ui::ET_TOUCH_PRESSED, frame_center, ui::EventTimeForNow(),
ui::PointerDetails(ui::EventPointerType::POINTER_TYPE_TOUCH,
/* pointer_id*/ 0,
/* radius_x */ 30.0f,
/* radius_y */ 30.0f,
/* force */ 0.0f));
UpdateEventRootLocation(&touch_event, rwhv);
rwhv->OnTouchEvent(&touch_event);
{
auto observer =
std::make_unique<MainThreadFrameObserver>(render_widget_host);
observer->Wait();
}
// Verify touch handler in subframe was invoked.
std::string result;
EXPECT_TRUE(ExecuteScriptAndExtractString(
root, "window.domAutomationController.send(getLastTouchEvent());",
&result));
EXPECT_EQ("touchstart", result);
// Verify the presence of the touch handler in the child frame correctly
// propagates touch-action:none information back to the child's input router.
EXPECT_EQ(cc::kTouchActionNone,
render_widget_host->input_router()->AllowedTouchAction());
}
namespace {
// Declared here to be close to the SubframeGestureEventRouting test.
void OnSyntheticGestureCompleted(scoped_refptr<MessageLoopRunner> runner,
SyntheticGesture::Result result) {
EXPECT_EQ(SyntheticGesture::GESTURE_FINISHED, result);
runner->Quit();
}
} // anonymous namespace
// https://crbug.com/592320
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
DISABLED_SubframeGestureEventRouting) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
GURL frame_url(
embedded_test_server()->GetURL("b.com", "/page_with_click_handler.html"));
NavigateFrameToURL(root->child_at(0), frame_url);
auto* child_frame_host = root->child_at(0)->current_frame_host();
// Synchronize with the child and parent renderers to guarantee that the
// surface information required for event hit testing is ready.
WaitForChildFrameSurfaceReady(child_frame_host);
// There have been no GestureTaps sent yet.
{
std::string result;
EXPECT_TRUE(ExecuteScriptAndExtractString(
child_frame_host,
"window.domAutomationController.send(getClickStatus());", &result));
EXPECT_EQ("0 clicks received", result);
}
// Simulate touch sequence to send GestureTap to sub-frame.
SyntheticTapGestureParams params;
params.gesture_source_type = SyntheticGestureParams::TOUCH_INPUT;
gfx::Point center(150, 150);
params.position = gfx::PointF(center.x(), center.y());
params.duration_ms = 100;
std::unique_ptr<SyntheticTapGesture> gesture(new SyntheticTapGesture(params));
scoped_refptr<MessageLoopRunner> runner = new MessageLoopRunner();
RenderWidgetHostImpl* render_widget_host =
root->current_frame_host()->GetRenderWidgetHost();
// TODO(wjmaclean): Convert the call to base::Bind() to a lambda someday.
render_widget_host->QueueSyntheticGesture(
std::move(gesture), base::BindOnce(OnSyntheticGestureCompleted, runner));
// We need to run the message loop while we wait for the synthetic gesture
// to be processed; the callback registered above will get us out of the
// message loop when that happens.
runner->Run();
runner = nullptr;
// Verify click handler in subframe was invoked
{
std::string result;
EXPECT_TRUE(ExecuteScriptAndExtractString(
child_frame_host,
"window.domAutomationController.send(getClickStatus());", &result));
EXPECT_EQ("1 click received", result);
}
}
namespace {
// Defined here to be close to
// SitePerProcessHitTestBrowserTest.InputEventRouterGestureTargetQueueTest.
// Will wait for RenderWidgetHost's compositor thread to sync if one is given.
// Returns the unique_touch_id of the TouchStart.
uint32_t SendTouchTapWithExpectedTarget(
RenderWidgetHostViewBase* root_view,
const gfx::Point& touch_point,
RenderWidgetHostViewBase*& router_touch_target,
const RenderWidgetHostViewBase* expected_target,
RenderWidgetHostImpl* child_render_widget_host) {
auto* root_view_aura = static_cast<RenderWidgetHostViewAura*>(root_view);
if (child_render_widget_host != nullptr) {
MainThreadFrameObserver observer(child_render_widget_host);
observer.Wait();
}
ui::TouchEvent touch_event_pressed(
ui::ET_TOUCH_PRESSED, touch_point, ui::EventTimeForNow(),
ui::PointerDetails(ui::EventPointerType::POINTER_TYPE_TOUCH,
/* pointer_id*/ 0,
/* radius_x */ 30.0f,
/* radius_y */ 30.0f,
/* force */ 0.0f));
UpdateEventRootLocation(&touch_event_pressed, root_view_aura);
InputEventAckWaiter waiter(expected_target->GetRenderWidgetHost(),
blink::WebInputEvent::kTouchStart);
root_view_aura->OnTouchEvent(&touch_event_pressed);
if (child_render_widget_host != nullptr) {
MainThreadFrameObserver observer(child_render_widget_host);
observer.Wait();
}
waiter.Wait();
EXPECT_EQ(expected_target, router_touch_target);
ui::TouchEvent touch_event_released(
ui::ET_TOUCH_RELEASED, touch_point, ui::EventTimeForNow(),
ui::PointerDetails(ui::EventPointerType::POINTER_TYPE_TOUCH,
/* pointer_id*/ 0,
/* radius_x */ 30.0f,
/* radius_y */ 30.0f,
/* force */ 0.0f));
UpdateEventRootLocation(&touch_event_released, root_view_aura);
root_view_aura->OnTouchEvent(&touch_event_released);
if (child_render_widget_host != nullptr) {
MainThreadFrameObserver observer(child_render_widget_host);
observer.Wait();
}
EXPECT_EQ(nullptr, router_touch_target);
return touch_event_pressed.unique_event_id();
}
void SendGestureTapSequenceWithExpectedTarget(
RenderWidgetHostViewBase* root_view,
const gfx::Point& gesture_point,
RenderWidgetHostViewBase*& router_gesture_target,
const RenderWidgetHostViewBase* old_expected_target,
const RenderWidgetHostViewBase* expected_target,
const uint32_t unique_touch_event_id) {
auto* root_view_aura = static_cast<RenderWidgetHostViewAura*>(root_view);
ui::GestureEventDetails gesture_begin_details(ui::ET_GESTURE_BEGIN);
gesture_begin_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_begin_event(
gesture_point.x(), gesture_point.y(), 0, ui::EventTimeForNow(),
gesture_begin_details, unique_touch_event_id);
UpdateEventRootLocation(&gesture_begin_event, root_view_aura);
root_view_aura->OnGestureEvent(&gesture_begin_event);
// We expect to still have the old gesture target in place for the
// GestureFlingCancel that will be inserted before GestureTapDown.
// Note: the GestureFlingCancel is inserted by RenderWidgetHostViewAura::
// OnGestureEvent() when it sees ui::ET_GESTURE_TAP_DOWN, so we don't
// explicitly add it here.
EXPECT_EQ(old_expected_target, router_gesture_target);
ui::GestureEventDetails gesture_tap_down_details(ui::ET_GESTURE_TAP_DOWN);
gesture_tap_down_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_tap_down_event(
gesture_point.x(), gesture_point.y(), 0, ui::EventTimeForNow(),
gesture_tap_down_details, unique_touch_event_id);
UpdateEventRootLocation(&gesture_tap_down_event, root_view_aura);
root_view_aura->OnGestureEvent(&gesture_tap_down_event);
EXPECT_EQ(expected_target, router_gesture_target);
ui::GestureEventDetails gesture_show_press_details(ui::ET_GESTURE_SHOW_PRESS);
gesture_show_press_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_show_press_event(
gesture_point.x(), gesture_point.y(), 0, ui::EventTimeForNow(),
gesture_show_press_details, unique_touch_event_id);
UpdateEventRootLocation(&gesture_show_press_event, root_view_aura);
root_view_aura->OnGestureEvent(&gesture_show_press_event);
EXPECT_EQ(expected_target, router_gesture_target);
ui::GestureEventDetails gesture_tap_details(ui::ET_GESTURE_TAP);
gesture_tap_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
gesture_tap_details.set_tap_count(1);
ui::GestureEvent gesture_tap_event(gesture_point.x(), gesture_point.y(), 0,
ui::EventTimeForNow(), gesture_tap_details,
unique_touch_event_id);
UpdateEventRootLocation(&gesture_tap_event, root_view_aura);
root_view_aura->OnGestureEvent(&gesture_tap_event);
EXPECT_EQ(expected_target, router_gesture_target);
ui::GestureEventDetails gesture_end_details(ui::ET_GESTURE_END);
gesture_end_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_end_event(gesture_point.x(), gesture_point.y(), 0,
ui::EventTimeForNow(), gesture_end_details,
unique_touch_event_id);
UpdateEventRootLocation(&gesture_end_event, root_view_aura);
root_view_aura->OnGestureEvent(&gesture_end_event);
EXPECT_EQ(expected_target, router_gesture_target);
}
void SendTouchpadPinchSequenceWithExpectedTarget(
RenderWidgetHostViewBase* root_view,
const gfx::Point& gesture_point,
RenderWidgetHostViewBase*& router_touchpad_gesture_target,
RenderWidgetHostViewBase* expected_target) {
auto* root_view_aura = static_cast<RenderWidgetHostViewAura*>(root_view);
ui::GestureEventDetails pinch_begin_details(ui::ET_GESTURE_PINCH_BEGIN);
pinch_begin_details.set_device_type(ui::GestureDeviceType::DEVICE_TOUCHPAD);
ui::GestureEvent pinch_begin(gesture_point.x(), gesture_point.y(), 0,
ui::EventTimeForNow(), pinch_begin_details);
UpdateEventRootLocation(&pinch_begin, root_view_aura);
TestInputEventObserver target_monitor(expected_target->GetRenderWidgetHost());
InputEventAckWaiter waiter(expected_target->GetRenderWidgetHost(),
blink::WebInputEvent::kGesturePinchBegin);
root_view_aura->OnGestureEvent(&pinch_begin);
// If the expected target is not the root, then we should be doing async
// targeting first. So event dispatch should not happen synchronously.
// Validate that the expected target does not receive the event immediately in
// such cases.
if (root_view != expected_target)
EXPECT_FALSE(target_monitor.EventWasReceived());
waiter.Wait();
EXPECT_TRUE(target_monitor.EventWasReceived());
EXPECT_EQ(expected_target, router_touchpad_gesture_target);
target_monitor.ResetEventsReceived();
ui::GestureEventDetails pinch_update_details(ui::ET_GESTURE_PINCH_UPDATE);
pinch_update_details.set_device_type(ui::GestureDeviceType::DEVICE_TOUCHPAD);
ui::GestureEvent pinch_update(gesture_point.x(), gesture_point.y(), 0,
ui::EventTimeForNow(), pinch_update_details);
UpdateEventRootLocation(&pinch_update, root_view_aura);
root_view_aura->OnGestureEvent(&pinch_update);
EXPECT_EQ(expected_target, router_touchpad_gesture_target);
EXPECT_TRUE(target_monitor.EventWasReceived());
EXPECT_EQ(target_monitor.EventType(),
blink::WebInputEvent::kGesturePinchUpdate);
target_monitor.ResetEventsReceived();
ui::GestureEventDetails pinch_end_details(ui::ET_GESTURE_PINCH_END);
pinch_end_details.set_device_type(ui::GestureDeviceType::DEVICE_TOUCHPAD);
ui::GestureEvent pinch_end(gesture_point.x(), gesture_point.y(), 0,
ui::EventTimeForNow(), pinch_end_details);
UpdateEventRootLocation(&pinch_end, root_view_aura);
root_view_aura->OnGestureEvent(&pinch_end);
EXPECT_EQ(expected_target, router_touchpad_gesture_target);
EXPECT_TRUE(target_monitor.EventWasReceived());
EXPECT_EQ(target_monitor.EventType(), blink::WebInputEvent::kGesturePinchEnd);
}
#if !defined(OS_WIN)
// Sending touchpad fling events is not supported on Windows.
void SendTouchpadFlingSequenceWithExpectedTarget(
RenderWidgetHostViewBase* root_view,
const gfx::Point& gesture_point,
RenderWidgetHostViewBase*& router_touchpad_gesture_target,
RenderWidgetHostViewBase* expected_target) {
auto* root_view_aura = static_cast<RenderWidgetHostViewAura*>(root_view);
if (root_view_aura->wheel_scroll_latching_enabled()) {
// Touchpad Fling must be sent inside a gesture scroll seqeunce.
blink::WebGestureEvent gesture_event(
blink::WebGestureEvent::kGestureScrollBegin,
blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests(),
blink::kWebGestureDeviceTouchpad);
gesture_event.SetPositionInWidget(gfx::PointF(gesture_point));
gesture_event.data.scroll_begin.delta_x_hint = 0.0f;
gesture_event.data.scroll_begin.delta_y_hint = 1.0f;
expected_target->GetRenderWidgetHost()->ForwardGestureEvent(gesture_event);
}
ui::ScrollEvent fling_start(ui::ET_SCROLL_FLING_START, gesture_point,
ui::EventTimeForNow(), 0, 1, 0, 1, 0, 1);
UpdateEventRootLocation(&fling_start, root_view_aura);
TestInputEventObserver target_monitor(expected_target->GetRenderWidgetHost());
InputEventAckWaiter waiter(expected_target->GetRenderWidgetHost(),
blink::WebInputEvent::kGestureFlingStart);
root_view_aura->OnScrollEvent(&fling_start);
// If the expected target is not the root, then we should be doing async
// targeting first. So event dispatch should not happen synchronously.
// Validate that the expected target does not receive the event immediately in
// such cases.
if (root_view != expected_target)
EXPECT_FALSE(target_monitor.EventWasReceived());
waiter.Wait();
EXPECT_TRUE(target_monitor.EventWasReceived());
EXPECT_EQ(expected_target, router_touchpad_gesture_target);
target_monitor.ResetEventsReceived();
ui::ScrollEvent fling_cancel(ui::ET_SCROLL_FLING_CANCEL, gesture_point,
ui::EventTimeForNow(), 0, 1, 0, 1, 0, 1);
UpdateEventRootLocation(&fling_cancel, root_view_aura);
root_view_aura->OnScrollEvent(&fling_cancel);
EXPECT_EQ(expected_target, router_touchpad_gesture_target);
EXPECT_TRUE(target_monitor.EventWasReceived());
EXPECT_EQ(target_monitor.EventType(),
blink::WebInputEvent::kGestureFlingCancel);
if (root_view_aura->wheel_scroll_latching_enabled()) {
blink::WebGestureEvent gesture_event(
blink::WebGestureEvent::kGestureScrollEnd,
blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests(),
blink::kWebGestureDeviceTouchpad);
gesture_event.SetPositionInWidget(gfx::PointF(gesture_point));
expected_target->GetRenderWidgetHost()->ForwardGestureEvent(gesture_event);
}
}
#endif // !defined(OS_WIN)
} // anonymous namespace
// Flaky, see https://crbug.com/823578
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
DISABLED_InputEventRouterGestureTargetMapTest) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
WebContentsImpl* contents = web_contents();
FrameTreeNode* root = contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
GURL frame_url(
embedded_test_server()->GetURL("b.com", "/page_with_click_handler.html"));
NavigateFrameToURL(root->child_at(0), frame_url);
auto* child_frame_host = root->child_at(0)->current_frame_host();
auto* rwhv_child =
static_cast<RenderWidgetHostViewBase*>(child_frame_host->GetView());
// Synchronize with the child and parent renderers to guarantee that the
// surface information required for event hit testing is ready.
WaitForChildFrameSurfaceReady(child_frame_host);
// All touches & gestures are sent to the main frame's view, and should be
// routed appropriately from there.
auto* rwhv_parent = static_cast<RenderWidgetHostViewBase*>(
contents->GetRenderWidgetHostView());
RenderWidgetHostInputEventRouter* router = contents->GetInputEventRouter();
EXPECT_TRUE(router->touchscreen_gesture_target_map_.empty());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send touch sequence to main-frame.
gfx::Point main_frame_point(25, 25);
uint32_t firstId = SendTouchTapWithExpectedTarget(
rwhv_parent, main_frame_point, router->touch_target_.target, rwhv_parent,
nullptr);
EXPECT_EQ(1u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send touch sequence to child.
gfx::Point child_center(150, 150);
uint32_t secondId = SendTouchTapWithExpectedTarget(
rwhv_parent, child_center, router->touch_target_.target, rwhv_child,
nullptr);
EXPECT_EQ(2u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send another touch sequence to main frame.
uint32_t thirdId = SendTouchTapWithExpectedTarget(
rwhv_parent, main_frame_point, router->touch_target_.target, rwhv_parent,
nullptr);
EXPECT_EQ(3u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send Gestures to clear GestureTargetQueue.
// The first touch sequence should generate a GestureTapDown, sent to the
// main frame.
SendGestureTapSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchscreen_gesture_target_.target,
nullptr, rwhv_parent, firstId);
EXPECT_EQ(2u, router->touchscreen_gesture_target_map_.size());
// Note: rwhv_parent is the target used for GestureFlingCancel sent by
// RenderWidgetHostViewAura::OnGestureEvent() at the start of the next gesture
// sequence; the sequence itself goes to rwhv_child.
EXPECT_EQ(rwhv_parent, router->touchscreen_gesture_target_.target);
// The second touch sequence should generate a GestureTapDown, sent to the
// child frame.
SendGestureTapSequenceWithExpectedTarget(
rwhv_parent, child_center, router->touchscreen_gesture_target_.target,
rwhv_parent, rwhv_child, secondId);
EXPECT_EQ(1u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(rwhv_child, router->touchscreen_gesture_target_.target);
// The third touch sequence should generate a GestureTapDown, sent to the
// main frame.
SendGestureTapSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchscreen_gesture_target_.target,
rwhv_child, rwhv_parent, thirdId);
EXPECT_EQ(0u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(rwhv_parent, router->touchscreen_gesture_target_.target);
}
// TODO: Flaking test crbug.com/802827
#if defined(OS_WIN)
#define MAYBE_InputEventRouterGesturePreventDefaultTargetMapTest \
DISABLED_InputEventRouterGesturePreventDefaultTargetMapTest
#else
#define MAYBE_InputEventRouterGesturePreventDefaultTargetMapTest \
InputEventRouterGesturePreventDefaultTargetMapTest
#endif
#if defined(USE_AURA) || defined(OS_ANDROID)
IN_PROC_BROWSER_TEST_P(
SitePerProcessHitTestBrowserTest,
MAYBE_InputEventRouterGesturePreventDefaultTargetMapTest) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
WebContentsImpl* contents = web_contents();
FrameTreeNode* root = contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
GURL frame_url(embedded_test_server()->GetURL(
"b.com", "/page_with_touch_start_default_prevented.html"));
NavigateFrameToURL(root->child_at(0), frame_url);
auto* child_frame_host = root->child_at(0)->current_frame_host();
RenderWidgetHostImpl* child_render_widget_host =
child_frame_host->GetRenderWidgetHost();
auto* rwhv_child =
static_cast<RenderWidgetHostViewBase*>(child_frame_host->GetView());
// Synchronize with the child and parent renderers to guarantee that the
// surface information required for event hit testing is ready.
WaitForChildFrameSurfaceReady(child_frame_host);
// All touches & gestures are sent to the main frame's view, and should be
// routed appropriately from there.
auto* rwhv_parent = static_cast<RenderWidgetHostViewBase*>(
contents->GetRenderWidgetHostView());
RenderWidgetHostInputEventRouter* router = contents->GetInputEventRouter();
EXPECT_TRUE(router->touchscreen_gesture_target_map_.empty());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send touch sequence to main-frame.
gfx::Point main_frame_point(25, 25);
uint32_t firstId = SendTouchTapWithExpectedTarget(
rwhv_parent, main_frame_point, router->touch_target_.target, rwhv_parent,
child_render_widget_host);
EXPECT_EQ(1u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send touch sequence to child.
gfx::Point child_center(150, 150);
SendTouchTapWithExpectedTarget(rwhv_parent, child_center,
router->touch_target_.target, rwhv_child,
child_render_widget_host);
EXPECT_EQ(1u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send another touch sequence to main frame.
uint32_t thirdId = SendTouchTapWithExpectedTarget(
rwhv_parent, main_frame_point, router->touch_target_.target, rwhv_parent,
child_render_widget_host);
EXPECT_EQ(2u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(nullptr, router->touchscreen_gesture_target_.target);
// Send Gestures to clear GestureTargetQueue.
// The first touch sequence should generate a GestureTapDown, sent to the
// main frame.
SendGestureTapSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchscreen_gesture_target_.target,
nullptr, rwhv_parent, firstId);
EXPECT_EQ(1u, router->touchscreen_gesture_target_map_.size());
// Note: rwhv_parent is the target used for GestureFlingCancel sent by
// RenderWidgetHostViewAura::OnGestureEvent() at the start of the next gesture
// sequence; the sequence itself goes to rwhv_child.
EXPECT_EQ(rwhv_parent, router->touchscreen_gesture_target_.target);
// The third touch sequence should generate a GestureTapDown, sent to the
// main frame.
SendGestureTapSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchscreen_gesture_target_.target,
rwhv_parent, rwhv_parent, thirdId);
EXPECT_EQ(0u, router->touchscreen_gesture_target_map_.size());
EXPECT_EQ(rwhv_parent, router->touchscreen_gesture_target_.target);
}
#endif // defined(USE_AURA) || defined(OS_ANDROID)
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
InputEventRouterTouchpadGestureTargetTest) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
WebContentsImpl* contents = web_contents();
FrameTreeNode* root = contents->GetFrameTree()->root();
ASSERT_EQ(1U, root->child_count());
GURL frame_url(
embedded_test_server()->GetURL("b.com", "/page_with_click_handler.html"));
NavigateFrameToURL(root->child_at(0), frame_url);
auto* child_frame_host = root->child_at(0)->current_frame_host();
// Synchronize with the child and parent renderers to guarantee that the
// surface information required for event hit testing is ready.
auto* rwhv_child =
static_cast<RenderWidgetHostViewBase*>(child_frame_host->GetView());
WaitForChildFrameSurfaceReady(child_frame_host);
// All touches & gestures are sent to the main frame's view, and should be
// routed appropriately from there.
auto* rwhv_parent = static_cast<RenderWidgetHostViewBase*>(
contents->GetRenderWidgetHostView());
RenderWidgetHostInputEventRouter* router = contents->GetInputEventRouter();
EXPECT_EQ(nullptr, router->touchpad_gesture_target_.target);
gfx::Point main_frame_point(25, 25);
gfx::Point child_center(150, 150);
// Send touchpad pinch sequence to main-frame.
SendTouchpadPinchSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchpad_gesture_target_.target,
rwhv_parent);
// Send touchpad pinch sequence to child.
SendTouchpadPinchSequenceWithExpectedTarget(
rwhv_parent, child_center, router->touchpad_gesture_target_.target,
rwhv_child);
// Send another touchpad pinch sequence to main frame.
SendTouchpadPinchSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchpad_gesture_target_.target,
rwhv_parent);
#if !defined(OS_WIN)
// Sending touchpad fling events is not supported on Windows.
// Send touchpad fling sequence to main-frame.
SendTouchpadFlingSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchpad_gesture_target_.target,
rwhv_parent);
// Send touchpad fling sequence to child.
SendTouchpadFlingSequenceWithExpectedTarget(
rwhv_parent, child_center, router->touchpad_gesture_target_.target,
rwhv_child);
// Send another touchpad fling sequence to main frame.
SendTouchpadFlingSequenceWithExpectedTarget(
rwhv_parent, main_frame_point, router->touchpad_gesture_target_.target,
rwhv_parent);
#endif
}
#endif // defined(USE_AURA)
// A WebContentsDelegate to capture ContextMenu creation events.
class ContextMenuObserverDelegate : public WebContentsDelegate {
public:
ContextMenuObserverDelegate()
: context_menu_created_(false),
message_loop_runner_(new MessageLoopRunner) {}
~ContextMenuObserverDelegate() override {}
bool HandleContextMenu(const content::ContextMenuParams& params) override {
context_menu_created_ = true;
menu_params_ = params;
message_loop_runner_->Quit();
return true;
}
ContextMenuParams getParams() { return menu_params_; }
void Wait() {
if (!context_menu_created_)
message_loop_runner_->Run();
context_menu_created_ = false;
}
private:
bool context_menu_created_;
ContextMenuParams menu_params_;
// The MessageLoopRunner used to spin the message loop.
scoped_refptr<MessageLoopRunner> message_loop_runner_;
DISALLOW_COPY_AND_ASSIGN(ContextMenuObserverDelegate);
};
// Helper function to run the CreateContextMenuTest in either normal
// or high DPI mode.
void CreateContextMenuTestHelper(
Shell* shell,
net::test_server::EmbeddedTestServer* embedded_test_server) {
GURL main_url(embedded_test_server->GetURL(
"/frame_tree/page_with_positioned_frame.html"));
EXPECT_TRUE(NavigateToURL(shell, main_url));
// It is safe to obtain the root frame tree node here, as it doesn't change.
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell->web_contents())
->GetFrameTree()
->root();
ASSERT_EQ(1U, root->child_count());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server->GetURL("baz.com", "/title1.html"));
EXPECT_EQ(site_url, child_node->current_url());
EXPECT_NE(shell->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
RenderWidgetHostViewBase* root_view = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
// Ensure that the child process renderer is ready to have input events
// routed to it. This happens when the browser process has received
// updated compositor surfaces from both renderer processes.
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
// A WebContentsDelegate to listen for the ShowContextMenu message.
ContextMenuObserverDelegate context_menu_delegate;
shell->web_contents()->SetDelegate(&context_menu_delegate);
RenderWidgetHostInputEventRouter* router =
static_cast<WebContentsImpl*>(shell->web_contents())
->GetInputEventRouter();
float scale_factor = GetPageScaleFactor(shell);
gfx::Rect root_bounds = root_view->GetViewBounds();
gfx::Rect bounds = rwhv_child->GetViewBounds();
gfx::Point point(
gfx::ToCeiledInt((bounds.x() - root_bounds.x() + 5) * scale_factor),
gfx::ToCeiledInt((bounds.y() - root_bounds.y() + 5) * scale_factor));
// Target right-click event to child frame.
blink::WebMouseEvent click_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
click_event.button = blink::WebPointerProperties::Button::kRight;
SetWebEventPositions(&click_event, point, root_view);
click_event.click_count = 1;
router->RouteMouseEvent(root_view, &click_event, ui::LatencyInfo());
// We also need a MouseUp event, needed by Windows.
click_event.SetType(blink::WebInputEvent::kMouseUp);
SetWebEventPositions(&click_event, point, root_view);
router->RouteMouseEvent(root_view, &click_event, ui::LatencyInfo());
context_menu_delegate.Wait();
ContextMenuParams params = context_menu_delegate.getParams();
EXPECT_NEAR(point.x(), params.x, 2);
EXPECT_NEAR(point.y(), params.y, 2);
}
// Test that a mouse right-click to an out-of-process iframe causes a context
// menu to be generated with the correct screen position.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
CreateContextMenuTest) {
CreateContextMenuTestHelper(shell(), embedded_test_server());
}
// Test that a mouse right-click to an out-of-process iframe causes a context
// menu to be generated with the correct screen position on a screen with
// non-default scale factor.
#if defined(OS_ANDROID) || defined(OS_WIN)
// High DPI tests don't work properly on Android, which has fixed scale factor.
// Windows is disabled because of https://crbug.com/545547.
#define MAYBE_HighDPICreateContextMenuTest DISABLED_HighDPICreateContextMenuTest
#else
#define MAYBE_HighDPICreateContextMenuTest HighDPICreateContextMenuTest
#endif
IN_PROC_BROWSER_TEST_P(SitePerProcessHighDPIHitTestBrowserTest,
MAYBE_HighDPICreateContextMenuTest) {
CreateContextMenuTestHelper(shell(), embedded_test_server());
}
class ShowWidgetMessageFilter : public content::BrowserMessageFilter {
public:
ShowWidgetMessageFilter()
#if defined(OS_MACOSX) || defined(OS_ANDROID)
: content::BrowserMessageFilter(FrameMsgStart),
#else
: content::BrowserMessageFilter(ViewMsgStart),
#endif
message_loop_runner_(new content::MessageLoopRunner) {
}
bool OnMessageReceived(const IPC::Message& message) override {
IPC_BEGIN_MESSAGE_MAP(ShowWidgetMessageFilter, message)
#if defined(OS_MACOSX) || defined(OS_ANDROID)
IPC_MESSAGE_HANDLER(FrameHostMsg_ShowPopup, OnShowPopup)
#else
IPC_MESSAGE_HANDLER(ViewHostMsg_ShowWidget, OnShowWidget)
#endif
IPC_END_MESSAGE_MAP()
return false;
}
gfx::Rect last_initial_rect() const { return initial_rect_; }
int last_routing_id() const { return routing_id_; }
void Wait() {
initial_rect_ = gfx::Rect();
routing_id_ = MSG_ROUTING_NONE;
message_loop_runner_->Run();
}
void Reset() {
initial_rect_ = gfx::Rect();
routing_id_ = MSG_ROUTING_NONE;
message_loop_runner_ = new content::MessageLoopRunner;
}
private:
~ShowWidgetMessageFilter() override {}
void OnShowWidget(int route_id, const gfx::Rect& initial_rect) {
content::BrowserThread::PostTask(
content::BrowserThread::UI, FROM_HERE,
base::BindOnce(&ShowWidgetMessageFilter::OnShowWidgetOnUI, this,
route_id, initial_rect));
}
#if defined(OS_MACOSX) || defined(OS_ANDROID)
void OnShowPopup(const FrameHostMsg_ShowPopup_Params& params) {
content::BrowserThread::PostTask(
content::BrowserThread::UI, FROM_HERE,
base::Bind(&ShowWidgetMessageFilter::OnShowWidgetOnUI, this,
MSG_ROUTING_NONE, params.bounds));
}
#endif
void OnShowWidgetOnUI(int route_id, const gfx::Rect& initial_rect) {
initial_rect_ = initial_rect;
routing_id_ = route_id;
message_loop_runner_->Quit();
}
scoped_refptr<content::MessageLoopRunner> message_loop_runner_;
gfx::Rect initial_rect_;
int routing_id_;
DISALLOW_COPY_AND_ASSIGN(ShowWidgetMessageFilter);
};
// Test that clicking a select element in an out-of-process iframe creates
// a popup menu in the correct position.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest, PopupMenuTest) {
GURL main_url(
embedded_test_server()->GetURL("/cross_site_iframe_factory.html?a(a)"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
RenderWidgetHostViewBase* rwhv_root = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
FrameTreeNode* child_node = root->child_at(0);
GURL site_url(embedded_test_server()->GetURL(
"baz.com", "/site_isolation/page-with-select.html"));
NavigateFrameToURL(child_node, site_url);
web_contents()->SendScreenRects();
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
EXPECT_NE(shell()->web_contents()->GetSiteInstance(),
child_node->current_frame_host()->GetSiteInstance());
scoped_refptr<ShowWidgetMessageFilter> filter = new ShowWidgetMessageFilter();
child_node->current_frame_host()->GetProcess()->AddFilter(filter.get());
// Target left-click event to child frame.
blink::WebMouseEvent click_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
click_event.button = blink::WebPointerProperties::Button::kLeft;
SetWebEventPositions(&click_event, gfx::Point(15, 15), rwhv_root);
click_event.click_count = 1;
rwhv_child->ProcessMouseEvent(click_event, ui::LatencyInfo());
// Dismiss the popup.
SetWebEventPositions(&click_event, gfx::Point(1, 1), rwhv_root);
rwhv_child->ProcessMouseEvent(click_event, ui::LatencyInfo());
filter->Wait();
gfx::Rect popup_rect = filter->last_initial_rect();
if (IsUseZoomForDSFEnabled()) {
ScreenInfo screen_info;
shell()->web_contents()->GetRenderWidgetHostView()->GetScreenInfo(
&screen_info);
popup_rect = gfx::ScaleToRoundedRect(popup_rect,
1 / screen_info.device_scale_factor);
}
#if defined(OS_MACOSX) || defined(OS_ANDROID)
// On Mac and Android we receive the coordinates before they are transformed,
// so they are still relative to the out-of-process iframe origin.
EXPECT_EQ(popup_rect.x(), 9);
EXPECT_EQ(popup_rect.y(), 9);
#else
EXPECT_EQ(popup_rect.x() - rwhv_root->GetViewBounds().x(), 354);
EXPECT_EQ(popup_rect.y() - rwhv_root->GetViewBounds().y(), 94);
#endif
#if defined(OS_LINUX)
// Verify click-and-drag selection of popups still works on Linux with
// OOPIFs enabled. This is only necessary to test on Aura because Mac and
// Android use native widgets. Windows does not support this as UI
// convention (it requires separate clicks to open the menu and select an
// option). See https://crbug.com/703191.
int process_id = child_node->current_frame_host()->GetProcess()->GetID();
filter->Reset();
RenderWidgetHostInputEventRouter* router =
static_cast<WebContentsImpl*>(shell()->web_contents())
->GetInputEventRouter();
// Re-open the select element.
SetWebEventPositions(&click_event, gfx::Point(360, 90), rwhv_root);
click_event.click_count = 1;
router->RouteMouseEvent(rwhv_root, &click_event, ui::LatencyInfo());
filter->Wait();
RenderWidgetHostViewAura* popup_view = static_cast<RenderWidgetHostViewAura*>(
RenderWidgetHost::FromID(process_id, filter->last_routing_id())
->GetView());
// The IO thread posts to ViewMsg_ShowWidget handlers in both the message
// filter above and the WebContents, which initializes the popup's view.
// It is possible for this code to execute before the WebContents handler,
// in which case OnMouseEvent would be called on an uninitialized RWHVA.
// This loop ensures that the initialization completes before proceeding.
while (!popup_view->window()) {
base::RunLoop loop;
base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
loop.QuitClosure());
loop.Run();
}
RenderWidgetHostMouseEventMonitor popup_monitor(
popup_view->GetRenderWidgetHost());
// Next send a mouse up directly targeting the first option, simulating a
// drag. This requires a ui::MouseEvent because it tests behavior that is
// above RWH input event routing.
ui::MouseEvent mouse_up_event(ui::ET_MOUSE_RELEASED, gfx::Point(10, 5),
gfx::Point(10, 5), ui::EventTimeForNow(),
ui::EF_LEFT_MOUSE_BUTTON,
ui::EF_LEFT_MOUSE_BUTTON);
UpdateEventRootLocation(&mouse_up_event, rwhv_root);
popup_view->OnMouseEvent(&mouse_up_event);
// This verifies that the popup actually received the event, and it wasn't
// diverted to a different RenderWidgetHostView due to mouse capture.
EXPECT_TRUE(popup_monitor.EventWasReceived());
#endif // defined(OS_LINUX)
}
// Test that clicking a select element in a nested out-of-process iframe creates
// a popup menu in the correct position, even if the top-level page repositions
// its out-of-process iframe. This verifies that screen positioning information
// is propagating down the frame tree correctly.
#if defined(OS_ANDROID)
// Surface-based hit testing and coordinate translation is not yet avaiable on
// Android.
#define MAYBE_NestedPopupMenuTest DISABLED_NestedPopupMenuTest
#else
// Times out frequently. https://crbug.com/599730.
#define MAYBE_NestedPopupMenuTest DISABLED_NestedPopupMenuTest
#endif
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest,
MAYBE_NestedPopupMenuTest) {
GURL main_url(embedded_test_server()->GetURL(
"/cross_site_iframe_factory.html?a(b(c))"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = web_contents()->GetFrameTree()->root();
RenderWidgetHostViewBase* rwhv_root = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
web_contents()->SendScreenRects();
// For clarity, we are labeling the frame tree nodes as:
// - root_node
// \-> b_node (out-of-process from root and c_node)
// \-> c_node (out-of-process from root and b_node)
content::TestNavigationObserver navigation_observer(shell()->web_contents());
FrameTreeNode* b_node = root->child_at(0);
FrameTreeNode* c_node = b_node->child_at(0);
GURL site_url(embedded_test_server()->GetURL(
"baz.com", "/site_isolation/page-with-select.html"));
NavigateFrameToURL(c_node, site_url);
RenderWidgetHostViewBase* rwhv_c_node =
static_cast<RenderWidgetHostViewBase*>(
c_node->current_frame_host()->GetRenderWidgetHost()->GetView());
EXPECT_NE(shell()->web_contents()->GetSiteInstance(),
c_node->current_frame_host()->GetSiteInstance());
scoped_refptr<ShowWidgetMessageFilter> filter = new ShowWidgetMessageFilter();
c_node->current_frame_host()->GetProcess()->AddFilter(filter.get());
// Target left-click event to child frame.
blink::WebMouseEvent click_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
click_event.button = blink::WebPointerProperties::Button::kLeft;
SetWebEventPositions(&click_event, gfx::Point(15, 15), rwhv_root);
click_event.click_count = 1;
rwhv_c_node->ProcessMouseEvent(click_event, ui::LatencyInfo());
// Prompt the WebContents to dismiss the popup by clicking elsewhere.
SetWebEventPositions(&click_event, gfx::Point(1, 1), rwhv_root);
rwhv_c_node->ProcessMouseEvent(click_event, ui::LatencyInfo());
filter->Wait();
gfx::Rect popup_rect = filter->last_initial_rect();
#if defined(OS_MACOSX)
EXPECT_EQ(popup_rect.x(), 9);
EXPECT_EQ(popup_rect.y(), 9);
#else
EXPECT_EQ(popup_rect.x() - rwhv_root->GetViewBounds().x(), 354);
EXPECT_EQ(popup_rect.y() - rwhv_root->GetViewBounds().y(), 154);
#endif
// Save the screen rect for b_node. Since it updates asynchronously from
// the script command that changes it, we need to wait for it to change
// before attempting to create the popup widget again.
gfx::Rect last_b_node_bounds_rect =
b_node->current_frame_host()->GetView()->GetViewBounds();
std::string script =
"var iframe = document.querySelector('iframe');"
"iframe.style.position = 'absolute';"
"iframe.style.left = 150;"
"iframe.style.top = 150;";
EXPECT_TRUE(ExecuteScript(root, script));
filter->Reset();
// Busy loop to wait for b_node's screen rect to get updated. There
// doesn't seem to be any better way to find out when this happens.
while (last_b_node_bounds_rect.x() ==
b_node->current_frame_host()->GetView()->GetViewBounds().x() &&
last_b_node_bounds_rect.y() ==
b_node->current_frame_host()->GetView()->GetViewBounds().y()) {
base::RunLoop run_loop;
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, run_loop.QuitClosure(), TestTimeouts::tiny_timeout());
run_loop.Run();
}
click_event.button = blink::WebPointerProperties::Button::kLeft;
SetWebEventPositions(&click_event, gfx::Point(15, 15), rwhv_root);
click_event.click_count = 1;
rwhv_c_node->ProcessMouseEvent(click_event, ui::LatencyInfo());
SetWebEventPositions(&click_event, gfx::Point(1, 1), rwhv_root);
rwhv_c_node->ProcessMouseEvent(click_event, ui::LatencyInfo());
filter->Wait();
popup_rect = filter->last_initial_rect();
#if defined(OS_MACOSX)
EXPECT_EQ(popup_rect.x(), 9);
EXPECT_EQ(popup_rect.y(), 9);
#else
EXPECT_EQ(popup_rect.x() - rwhv_root->GetViewBounds().x(), 203);
EXPECT_EQ(popup_rect.y() - rwhv_root->GetViewBounds().y(), 248);
#endif
}
#if defined(USE_AURA)
class SitePerProcessGestureHitTestBrowserTest
: public SitePerProcessHitTestBrowserTest {
public:
SitePerProcessGestureHitTestBrowserTest() {}
// This functions simulates a sequence of events that are typical of a
// gesture pinch at |position|. We need this since machinery in the event
// codepath will require GesturePinch* to be enclosed in
// GestureScrollBegin/End, and since RenderWidgetHostInputEventRouter needs
// both the preceding touch events, as well as GestureTapDown, in order to
// correctly target the subsequent gesture event stream. The minimum stream
// required to trigger the correct behaviours is represented here, but could
// be expanded to include additional events such as one or more
// GestureScrollUpdate and GesturePinchUpdate events.
void SendPinchBeginEndSequence(RenderWidgetHostViewAura* rwhva,
const gfx::Point& position,
RenderWidgetHost* expected_target_rwh) {
DCHECK(rwhva);
// Use full version of constructor with radius, angle and force since it
// will crash in the renderer otherwise.
ui::TouchEvent touch_pressed(
ui::ET_TOUCH_PRESSED, position, ui::EventTimeForNow(),
ui::PointerDetails(ui::EventPointerType::POINTER_TYPE_TOUCH,
/* pointer_id*/ 0,
/* radius_x */ 1.0f,
/* radius_y */ 1.0f,
/* force */ 1.0f));
UpdateEventRootLocation(&touch_pressed, rwhva);
InputEventAckWaiter waiter(expected_target_rwh,
blink::WebInputEvent::kTouchStart);
rwhva->OnTouchEvent(&touch_pressed);
waiter.Wait();
ui::TouchEvent touch_released(
ui::ET_TOUCH_RELEASED, position, ui::EventTimeForNow(),
ui::PointerDetails(ui::EventPointerType::POINTER_TYPE_TOUCH,
/* pointer_id*/ 0,
/* radius_x */ 1.0f,
/* radius_y */ 1.0f,
/* force */ 1.0f));
rwhva->OnTouchEvent(&touch_released);
ui::GestureEventDetails gesture_tap_down_details(ui::ET_GESTURE_TAP_DOWN);
gesture_tap_down_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_tap_down(
position.x(), position.y(), 0, ui::EventTimeForNow(),
gesture_tap_down_details, touch_pressed.unique_event_id());
UpdateEventRootLocation(&gesture_tap_down, rwhva);
rwhva->OnGestureEvent(&gesture_tap_down);
ui::GestureEventDetails gesture_scroll_begin_details(
ui::ET_GESTURE_SCROLL_BEGIN);
gesture_scroll_begin_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_scroll_begin(
position.x(), position.y(), 0, ui::EventTimeForNow(),
gesture_scroll_begin_details, touch_pressed.unique_event_id());
UpdateEventRootLocation(&gesture_scroll_begin, rwhva);
rwhva->OnGestureEvent(&gesture_scroll_begin);
ui::GestureEventDetails gesture_pinch_begin_details(
ui::ET_GESTURE_PINCH_BEGIN);
gesture_pinch_begin_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_pinch_begin(
position.x(), position.y(), 0, ui::EventTimeForNow(),
gesture_pinch_begin_details, touch_pressed.unique_event_id());
UpdateEventRootLocation(&gesture_pinch_begin, rwhva);
rwhva->OnGestureEvent(&gesture_pinch_begin);
ui::GestureEventDetails gesture_pinch_end_details(ui::ET_GESTURE_PINCH_END);
gesture_pinch_end_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_pinch_end(
position.x(), position.y(), 0, ui::EventTimeForNow(),
gesture_pinch_end_details, touch_pressed.unique_event_id());
UpdateEventRootLocation(&gesture_pinch_end, rwhva);
rwhva->OnGestureEvent(&gesture_pinch_end);
ui::GestureEventDetails gesture_scroll_end_details(
ui::ET_GESTURE_SCROLL_END);
gesture_scroll_end_details.set_device_type(
ui::GestureDeviceType::DEVICE_TOUCHSCREEN);
ui::GestureEvent gesture_scroll_end(
position.x(), position.y(), 0, ui::EventTimeForNow(),
gesture_scroll_end_details, touch_pressed.unique_event_id());
UpdateEventRootLocation(&gesture_scroll_end, rwhva);
rwhva->OnGestureEvent(&gesture_scroll_end);
}
void SetupRootAndChild() {
GURL main_url(embedded_test_server()->GetURL(
"a.com", "/cross_site_iframe_factory.html?a(b)"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root_node =
static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
FrameTreeNode* child_node = root_node->child_at(0);
rwhv_child_ = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
rwhva_root_ = static_cast<RenderWidgetHostViewAura*>(
shell()->web_contents()->GetRenderWidgetHostView());
WaitForChildFrameSurfaceReady(child_node->current_frame_host());
rwhi_child_ = child_node->current_frame_host()->GetRenderWidgetHost();
rwhi_root_ = root_node->current_frame_host()->GetRenderWidgetHost();
}
protected:
RenderWidgetHostViewBase* rwhv_child_;
RenderWidgetHostViewAura* rwhva_root_;
RenderWidgetHostImpl* rwhi_child_;
RenderWidgetHostImpl* rwhi_root_;
private:
DISALLOW_COPY_AND_ASSIGN(SitePerProcessGestureHitTestBrowserTest);
};
IN_PROC_BROWSER_TEST_P(SitePerProcessGestureHitTestBrowserTest,
SubframeGesturePinchGoesToMainFrame) {
SetupRootAndChild();
TestInputEventObserver root_frame_monitor(rwhi_root_);
TestInputEventObserver child_frame_monitor(rwhi_child_);
// Need child rect in main frame coords.
gfx::Rect bounds = rwhv_child_->GetViewBounds();
bounds.Offset(gfx::Point() - rwhva_root_->GetViewBounds().origin());
SendPinchBeginEndSequence(rwhva_root_, bounds.CenterPoint(), rwhi_child_);
// Verify root-RWHI gets GSB/GPB/GPE/GSE.
EXPECT_TRUE(root_frame_monitor.EventWasReceived());
EXPECT_EQ(blink::WebInputEvent::kGestureScrollBegin,
root_frame_monitor.events_received()[0]);
EXPECT_EQ(blink::WebInputEvent::kGesturePinchBegin,
root_frame_monitor.events_received()[1]);
EXPECT_EQ(blink::WebInputEvent::kGesturePinchEnd,
root_frame_monitor.events_received()[2]);
EXPECT_EQ(blink::WebInputEvent::kGestureScrollEnd,
root_frame_monitor.events_received()[3]);
// Verify child-RWHI gets TS/TE, GTD/GSB/GSE.
EXPECT_TRUE(child_frame_monitor.EventWasReceived());
EXPECT_EQ(blink::WebInputEvent::kTouchStart,
child_frame_monitor.events_received()[0]);
EXPECT_EQ(blink::WebInputEvent::kTouchEnd,
child_frame_monitor.events_received()[1]);
EXPECT_EQ(blink::WebInputEvent::kGestureTapDown,
child_frame_monitor.events_received()[2]);
EXPECT_EQ(blink::WebInputEvent::kGestureScrollBegin,
child_frame_monitor.events_received()[3]);
EXPECT_EQ(blink::WebInputEvent::kGestureScrollEnd,
child_frame_monitor.events_received()[4]);
}
IN_PROC_BROWSER_TEST_P(SitePerProcessGestureHitTestBrowserTest,
MainframeGesturePinchGoesToMainFrame) {
SetupRootAndChild();
TestInputEventObserver root_frame_monitor(rwhi_root_);
TestInputEventObserver child_frame_monitor(rwhi_child_);
// Need child rect in main frame coords.
gfx::Rect bounds = rwhv_child_->GetViewBounds();
bounds.Offset(gfx::Point() - rwhva_root_->GetViewBounds().origin());
gfx::Point main_frame_point(bounds.origin());
main_frame_point += gfx::Vector2d(-5, -5);
SendPinchBeginEndSequence(rwhva_root_, main_frame_point, rwhi_root_);
// Verify root-RWHI gets TS/TE/GTD/GSB/GPB/GPE/GSE.
EXPECT_TRUE(root_frame_monitor.EventWasReceived());
EXPECT_EQ(blink::WebInputEvent::kTouchStart,
root_frame_monitor.events_received()[0]);
EXPECT_EQ(blink::WebInputEvent::kTouchEnd,
root_frame_monitor.events_received()[1]);
EXPECT_EQ(blink::WebInputEvent::kGestureTapDown,
root_frame_monitor.events_received()[2]);
EXPECT_EQ(blink::WebInputEvent::kGestureScrollBegin,
root_frame_monitor.events_received()[3]);
EXPECT_EQ(blink::WebInputEvent::kGesturePinchBegin,
root_frame_monitor.events_received()[4]);
EXPECT_EQ(blink::WebInputEvent::kGesturePinchEnd,
root_frame_monitor.events_received()[5]);
EXPECT_EQ(blink::WebInputEvent::kGestureScrollEnd,
root_frame_monitor.events_received()[6]);
// Verify child-RWHI gets no events.
EXPECT_FALSE(child_frame_monitor.EventWasReceived());
}
#endif // defined(USE_AURA)
// Test that MouseDown and MouseUp to the same coordinates do not result in
// different coordinates after routing. See bug https://crbug.com/670253.
#if defined(OS_ANDROID)
// Android uses fixed scale factor, which makes this test unnecessary.
#define MAYBE_MouseClickWithNonIntegerScaleFactor \
DISABLED_MouseClickWithNonIntegerScaleFactor
#else
#define MAYBE_MouseClickWithNonIntegerScaleFactor \
MouseClickWithNonIntegerScaleFactor
#endif
IN_PROC_BROWSER_TEST_P(SitePerProcessNonIntegerScaleFactorHitTestBrowserTest,
MAYBE_MouseClickWithNonIntegerScaleFactor) {
GURL initial_url(embedded_test_server()->GetURL("a.com", "/title1.html"));
EXPECT_TRUE(NavigateToURL(shell(), initial_url));
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
RenderWidgetHostViewBase* rwhv = static_cast<RenderWidgetHostViewBase*>(
root->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostInputEventRouter* router =
static_cast<WebContentsImpl*>(shell()->web_contents())
->GetInputEventRouter();
// Create listener for input events.
RenderWidgetHostMouseEventMonitor event_monitor(
root->current_frame_host()->GetRenderWidgetHost());
blink::WebMouseEvent mouse_event(
blink::WebInputEvent::kMouseDown, blink::WebInputEvent::kNoModifiers,
blink::WebInputEvent::GetStaticTimeStampForTests());
mouse_event.button = blink::WebPointerProperties::Button::kLeft;
SetWebEventPositions(&mouse_event, gfx::Point(75, 75), rwhv);
mouse_event.click_count = 1;
event_monitor.ResetEventReceived();
router->RouteMouseEvent(rwhv, &mouse_event, ui::LatencyInfo());
EXPECT_TRUE(event_monitor.EventWasReceived());
gfx::Point mouse_down_coords =
gfx::Point(event_monitor.event().PositionInWidget().x,
event_monitor.event().PositionInWidget().y);
event_monitor.ResetEventReceived();
mouse_event.SetType(blink::WebInputEvent::kMouseUp);
SetWebEventPositions(&mouse_event, gfx::Point(75, 75), rwhv);
router->RouteMouseEvent(rwhv, &mouse_event, ui::LatencyInfo());
EXPECT_TRUE(event_monitor.EventWasReceived());
EXPECT_EQ(mouse_down_coords.x(), event_monitor.event().PositionInWidget().x);
// The transform from browser to renderer is (2, 35) in DIP. When we
// scale that to pixels, it's (3, 53). Note that 35 * 1.5 should be 52.5,
// so we already lost precision there in the transform from draw quad.
EXPECT_NEAR(mouse_down_coords.y(), event_monitor.event().PositionInWidget().y,
1);
}
IN_PROC_BROWSER_TEST_P(SitePerProcessNonIntegerScaleFactorHitTestBrowserTest,
NestedSurfaceHitTestTest) {
NestedSurfaceHitTestTestHelper(shell(), embedded_test_server());
}
// Verify InputTargetClient works within an OOPIF process.
IN_PROC_BROWSER_TEST_P(SitePerProcessHitTestBrowserTest, HitTestNestedFrames) {
GURL main_url(embedded_test_server()->GetURL(
"/frame_tree/page_with_positioned_nested_frames.html"));
EXPECT_TRUE(NavigateToURL(shell(), main_url));
FrameTreeNode* root = static_cast<WebContentsImpl*>(shell()->web_contents())
->GetFrameTree()
->root();
ASSERT_EQ(1U, root->child_count());
EXPECT_EQ(
" Site A ------------ proxies for B C\n"
" +--Site B ------- proxies for A C\n"
" +--Site C -- proxies for A B\n"
"Where A = http://127.0.0.1/\n"
" B = http://a.com/\n"
" C = http://baz.com/",
DepictFrameTree(root));
FrameTreeNode* child_node = root->child_at(0);
FrameTreeNode* grandchild_node = child_node->child_at(0);
RenderWidgetHostViewBase* rwhv_child = static_cast<RenderWidgetHostViewBase*>(
child_node->current_frame_host()->GetRenderWidgetHost()->GetView());
RenderWidgetHostViewBase* rwhv_grandchild =
static_cast<RenderWidgetHostViewBase*>(
grandchild_node->current_frame_host()
->GetRenderWidgetHost()
->GetView());
WaitForChildFrameSurfaceReady(grandchild_node->current_frame_host());
// Create two points to hit test: One in the child of the main frame, and
// one in the frame nested within that. The hit test request is sent to the
// child's renderer.
gfx::Point point_in_child(1, 1);
gfx::PointF point_in_nested_child(5, 5);
rwhv_grandchild->TransformPointToCoordSpaceForView(
point_in_nested_child, rwhv_child, &point_in_nested_child);
{
base::RunLoop run_loop;
viz::FrameSinkId received_frame_sink_id;
base::Closure quit_closure =
content::GetDeferredQuitTaskForRunLoop(&run_loop);
DCHECK_NE(child_node->current_frame_host()->GetInputTargetClient(),
nullptr);
child_node->current_frame_host()->GetInputTargetClient()->FrameSinkIdAt(
point_in_child,
base::BindLambdaForTesting([&](const viz::FrameSinkId& id) {
received_frame_sink_id = id;
quit_closure.Run();
}));
content::RunThisRunLoop(&run_loop);
// |point_in_child| should hit test to the view for |child_node|.
ASSERT_EQ(rwhv_child->GetFrameSinkId(), received_frame_sink_id);
}
{
base::RunLoop run_loop;
viz::FrameSinkId received_frame_sink_id;
base::Closure quit_closure =
content::GetDeferredQuitTaskForRunLoop(&run_loop);
DCHECK_NE(child_node->current_frame_host()->GetInputTargetClient(),
nullptr);
child_node->current_frame_host()->GetInputTargetClient()->FrameSinkIdAt(
gfx::ToCeiledPoint(point_in_nested_child),
base::BindLambdaForTesting([&](const viz::FrameSinkId& id) {
received_frame_sink_id = id;
quit_closure.Run();
}));
content::RunThisRunLoop(&run_loop);
// |point_in_nested_child| should hit test to |rwhv_grandchild|.
ASSERT_EQ(rwhv_grandchild->GetFrameSinkId(), received_frame_sink_id);
}
}
static const int kHitTestOption[] = {0, 1, 2};
static const float kOneScale[] = {1.f};
INSTANTIATE_TEST_CASE_P(/* no prefix */,
SitePerProcessHitTestBrowserTest,
testing::Combine(testing::ValuesIn(kHitTestOption),
testing::ValuesIn(kOneScale)));
// TODO(wjmaclean): Since the next two test fixtures only differ in DSF
// values, should we combine them into one using kMultiScale? This
// approach would make it more difficult to disable individual scales on
// particular platforms.
INSTANTIATE_TEST_CASE_P(/* no prefix */,
SitePerProcessHighDPIHitTestBrowserTest,
testing::Combine(testing::ValuesIn(kHitTestOption),
testing::ValuesIn(kOneScale)));
INSTANTIATE_TEST_CASE_P(/* no prefix */,
SitePerProcessNonIntegerScaleFactorHitTestBrowserTest,
testing::Combine(testing::ValuesIn(kHitTestOption),
testing::ValuesIn(kOneScale)));
#if defined(USE_AURA)
static const float kMultiScale[] = {1.f, 1.5f, 2.f};
INSTANTIATE_TEST_CASE_P(/* no prefix */,
SitePerProcessInternalsHitTestBrowserTest,
testing::Combine(testing::ValuesIn(kHitTestOption),
testing::ValuesIn(kMultiScale)));
INSTANTIATE_TEST_CASE_P(/* no prefix */,
SitePerProcessMouseWheelHitTestBrowserTest,
testing::Combine(testing::ValuesIn(kHitTestOption),
testing::ValuesIn(kOneScale)));
INSTANTIATE_TEST_CASE_P(
/* no prefix */,
SitePerProcessMouseWheelHitTestBrowserTestWheelScrollLatchingDisabled,
testing::Combine(testing::ValuesIn(kHitTestOption),
testing::ValuesIn(kOneScale)));
INSTANTIATE_TEST_CASE_P(/* no prefix */,
SitePerProcessGestureHitTestBrowserTest,
testing::Combine(testing::ValuesIn(kHitTestOption),
testing::ValuesIn(kOneScale)));
#endif
} // namespace content
| null | null | null | null | 19,044 |
13,197 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 178,192 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
#ifndef __ALPHA_WILDFIRE__H__
#define __ALPHA_WILDFIRE__H__
#include <linux/types.h>
#include <asm/compiler.h>
#define WILDFIRE_MAX_QBB 8 /* more than 8 requires other mods */
#define WILDFIRE_PCA_PER_QBB 4
#define WILDFIRE_IRQ_PER_PCA 64
#define WILDFIRE_NR_IRQS \
(WILDFIRE_MAX_QBB * WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
extern unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
extern unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
#define QBB_MAP_EMPTY 0xff
extern unsigned long wildfire_hard_qbb_mask;
extern unsigned long wildfire_soft_qbb_mask;
extern unsigned long wildfire_gp_mask;
extern unsigned long wildfire_hs_mask;
extern unsigned long wildfire_iop_mask;
extern unsigned long wildfire_ior_mask;
extern unsigned long wildfire_pca_mask;
extern unsigned long wildfire_cpu_mask;
extern unsigned long wildfire_mem_mask;
#define WILDFIRE_QBB_EXISTS(qbbno) (wildfire_soft_qbb_mask & (1 << (qbbno)))
#define WILDFIRE_MEM_EXISTS(qbbno) (wildfire_mem_mask & (0xf << ((qbbno) << 2)))
#define WILDFIRE_PCA_EXISTS(qbbno, pcano) \
(wildfire_pca_mask & (1 << (((qbbno) << 2) + (pcano))))
typedef struct {
volatile unsigned long csr __attribute__((aligned(64)));
} wildfire_64;
typedef struct {
volatile unsigned long csr __attribute__((aligned(256)));
} wildfire_256;
typedef struct {
volatile unsigned long csr __attribute__((aligned(2048)));
} wildfire_2k;
typedef struct {
wildfire_64 qsd_whami;
wildfire_64 qsd_rev;
wildfire_64 qsd_port_present;
wildfire_64 qsd_port_active;
wildfire_64 qsd_fault_ena;
wildfire_64 qsd_cpu_int_ena;
wildfire_64 qsd_mem_config;
wildfire_64 qsd_err_sum;
wildfire_64 ce_sum[4];
wildfire_64 dev_init[4];
wildfire_64 it_int[4];
wildfire_64 ip_int[4];
wildfire_64 uce_sum[4];
wildfire_64 se_sum__non_dev_int[4];
wildfire_64 scratch[4];
wildfire_64 qsd_timer;
wildfire_64 qsd_diag;
} wildfire_qsd;
typedef struct {
wildfire_256 qsd_whami;
wildfire_256 __pad1;
wildfire_256 ce_sum;
wildfire_256 dev_init;
wildfire_256 it_int;
wildfire_256 ip_int;
wildfire_256 uce_sum;
wildfire_256 se_sum;
} wildfire_fast_qsd;
typedef struct {
wildfire_2k qsa_qbb_id;
wildfire_2k __pad1;
wildfire_2k qsa_port_ena;
wildfire_2k qsa_scratch;
wildfire_2k qsa_config[5];
wildfire_2k qsa_ref_int;
wildfire_2k qsa_qbb_pop[2];
wildfire_2k qsa_dtag_fc;
wildfire_2k __pad2[3];
wildfire_2k qsa_diag;
wildfire_2k qsa_diag_lock[4];
wildfire_2k __pad3[11];
wildfire_2k qsa_cpu_err_sum;
wildfire_2k qsa_misc_err_sum;
wildfire_2k qsa_tmo_err_sum;
wildfire_2k qsa_err_ena;
wildfire_2k qsa_tmo_config;
wildfire_2k qsa_ill_cmd_err_sum;
wildfire_2k __pad4[26];
wildfire_2k qsa_busy_mask;
wildfire_2k qsa_arr_valid;
wildfire_2k __pad5[2];
wildfire_2k qsa_port_map[4];
wildfire_2k qsa_arr_addr[8];
wildfire_2k qsa_arr_mask[8];
} wildfire_qsa;
typedef struct {
wildfire_64 ioa_config;
wildfire_64 iod_config;
wildfire_64 iop_switch_credits;
wildfire_64 __pad1;
wildfire_64 iop_hose_credits;
wildfire_64 __pad2[11];
struct {
wildfire_64 __pad3;
wildfire_64 init;
} iop_hose[4];
wildfire_64 ioa_hose_0_ctrl;
wildfire_64 iod_hose_0_ctrl;
wildfire_64 ioa_hose_1_ctrl;
wildfire_64 iod_hose_1_ctrl;
wildfire_64 ioa_hose_2_ctrl;
wildfire_64 iod_hose_2_ctrl;
wildfire_64 ioa_hose_3_ctrl;
wildfire_64 iod_hose_3_ctrl;
struct {
wildfire_64 target;
wildfire_64 __pad4;
} iop_dev_int[4];
wildfire_64 iop_err_int_target;
wildfire_64 __pad5[7];
wildfire_64 iop_qbb_err_sum;
wildfire_64 __pad6;
wildfire_64 iop_qbb_se_sum;
wildfire_64 __pad7;
wildfire_64 ioa_err_sum;
wildfire_64 iod_err_sum;
wildfire_64 __pad8[4];
wildfire_64 ioa_diag_force_err;
wildfire_64 iod_diag_force_err;
wildfire_64 __pad9[4];
wildfire_64 iop_diag_send_err_int;
wildfire_64 __pad10[15];
wildfire_64 ioa_scratch;
wildfire_64 iod_scratch;
} wildfire_iop;
typedef struct {
wildfire_2k gpa_qbb_map[4];
wildfire_2k gpa_mem_pop_map;
wildfire_2k gpa_scratch;
wildfire_2k gpa_diag;
wildfire_2k gpa_config_0;
wildfire_2k __pad1;
wildfire_2k gpa_init_id;
wildfire_2k gpa_config_2;
/* not complete */
} wildfire_gp;
typedef struct {
wildfire_64 pca_what_am_i;
wildfire_64 pca_err_sum;
wildfire_64 pca_diag_force_err;
wildfire_64 pca_diag_send_err_int;
wildfire_64 pca_hose_credits;
wildfire_64 pca_scratch;
wildfire_64 pca_micro_addr;
wildfire_64 pca_micro_data;
wildfire_64 pca_pend_int;
wildfire_64 pca_sent_int;
wildfire_64 __pad1;
wildfire_64 pca_stdio_edge_level;
wildfire_64 __pad2[52];
struct {
wildfire_64 target;
wildfire_64 enable;
} pca_int[4];
wildfire_64 __pad3[56];
wildfire_64 pca_alt_sent_int[32];
} wildfire_pca;
typedef struct {
wildfire_64 ne_what_am_i;
/* not complete */
} wildfire_ne;
typedef struct {
wildfire_64 fe_what_am_i;
/* not complete */
} wildfire_fe;
typedef struct {
wildfire_64 pci_io_addr_ext;
wildfire_64 pci_ctrl;
wildfire_64 pci_err_sum;
wildfire_64 pci_err_addr;
wildfire_64 pci_stall_cnt;
wildfire_64 pci_iack_special;
wildfire_64 __pad1[2];
wildfire_64 pci_pend_int;
wildfire_64 pci_sent_int;
wildfire_64 __pad2[54];
struct {
wildfire_64 wbase;
wildfire_64 wmask;
wildfire_64 tbase;
} pci_window[4];
wildfire_64 pci_flush_tlb;
wildfire_64 pci_perf_mon;
} wildfire_pci;
#define WILDFIRE_ENTITY_SHIFT 18
#define WILDFIRE_GP_ENTITY (0x10UL << WILDFIRE_ENTITY_SHIFT)
#define WILDFIRE_IOP_ENTITY (0x08UL << WILDFIRE_ENTITY_SHIFT)
#define WILDFIRE_QSA_ENTITY (0x04UL << WILDFIRE_ENTITY_SHIFT)
#define WILDFIRE_QSD_ENTITY_SLOW (0x05UL << WILDFIRE_ENTITY_SHIFT)
#define WILDFIRE_QSD_ENTITY_FAST (0x01UL << WILDFIRE_ENTITY_SHIFT)
#define WILDFIRE_PCA_ENTITY(pca) ((0xc|(pca))<<WILDFIRE_ENTITY_SHIFT)
#define WILDFIRE_BASE (IDENT_ADDR | (1UL << 40))
#define WILDFIRE_QBB_MASK 0x0fUL /* for now, only 4 bits/16 QBBs */
#define WILDFIRE_QBB(q) ((~((long)(q)) & WILDFIRE_QBB_MASK) << 36)
#define WILDFIRE_HOSE(h) ((long)(h) << 33)
#define WILDFIRE_QBB_IO(q) (WILDFIRE_BASE | WILDFIRE_QBB(q))
#define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h))
#define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL)
#define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL)
#define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL)
#define WILDFIRE_qsd(q) \
((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23)))
#define WILDFIRE_fast_qsd() \
((wildfire_fast_qsd *)(WILDFIRE_QBB_IO(0)|WILDFIRE_QSD_ENTITY_FAST|(((1UL<<13)-1)<<23)))
#define WILDFIRE_qsa(q) \
((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23)))
#define WILDFIRE_iop(q) \
((wildfire_iop *)(WILDFIRE_QBB_IO(q)|WILDFIRE_IOP_ENTITY|(((1UL<<13)-1)<<23)))
#define WILDFIRE_gp(q) \
((wildfire_gp *)(WILDFIRE_QBB_IO(q)|WILDFIRE_GP_ENTITY|(((1UL<<13)-1)<<23)))
#define WILDFIRE_pca(q,pca) \
((wildfire_pca *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)))
#define WILDFIRE_ne(q,pca) \
((wildfire_ne *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(1UL<<16)))
#define WILDFIRE_fe(q,pca) \
((wildfire_fe *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(3UL<<15)))
#define WILDFIRE_pci(q,h) \
((wildfire_pci *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(((h)&6)>>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23)))
#define WILDFIRE_IO_BIAS WILDFIRE_IO(0,0)
#define WILDFIRE_MEM_BIAS WILDFIRE_MEM(0,0) /* ??? */
/* The IO address space is larger than 0xffff */
#define WILDFIRE_IO_SPACE (8UL*1024*1024)
#ifdef __KERNEL__
#ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline
#define __IO_EXTERN_INLINE
#endif
/*
* Memory functions. all accesses are done through linear space.
*/
__EXTERN_INLINE void __iomem *wildfire_ioportmap(unsigned long addr)
{
return (void __iomem *)(addr + WILDFIRE_IO_BIAS);
}
__EXTERN_INLINE void __iomem *wildfire_ioremap(unsigned long addr,
unsigned long size)
{
return (void __iomem *)(addr + WILDFIRE_MEM_BIAS);
}
__EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr)
{
return addr >= WILDFIRE_BASE;
}
__EXTERN_INLINE int wildfire_is_mmio(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long)xaddr;
return (addr & 0x100000000UL) == 0;
}
#undef __IO_PREFIX
#define __IO_PREFIX wildfire
#define wildfire_trivial_rw_bw 1
#define wildfire_trivial_rw_lq 1
#define wildfire_trivial_io_bw 1
#define wildfire_trivial_io_lq 1
#define wildfire_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
#undef __IO_EXTERN_INLINE
#endif
#endif /* __KERNEL__ */
#endif /* __ALPHA_WILDFIRE__H__ */
| null | null | null | null | 86,539 |
63,753 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 63,753 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_WEBUI_SNIPPETS_INTERNALS_UI_H_
#define CHROME_BROWSER_UI_WEBUI_SNIPPETS_INTERNALS_UI_H_
#include "base/macros.h"
#include "content/public/browser/web_ui_controller.h"
// The implementation for the chrome://snippets-internals page.
class SnippetsInternalsUI : public content::WebUIController {
public:
explicit SnippetsInternalsUI(content::WebUI* web_ui);
~SnippetsInternalsUI() override;
private:
DISALLOW_COPY_AND_ASSIGN(SnippetsInternalsUI);
};
#endif // CHROME_BROWSER_UI_WEBUI_SNIPPETS_INTERNALS_UI_H_
| null | null | null | null | 60,616 |
28,948 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 28,948 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
| null | null | null | null | 25,811 |
|
56,525 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 56,525 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_CHROMEOS_LOGIN_SCREENSHOT_TESTING_SCREENSHOT_TESTING_MIXIN_H_
#define CHROME_BROWSER_CHROMEOS_LOGIN_SCREENSHOT_TESTING_SCREENSHOT_TESTING_MIXIN_H_
#include <string>
#include "base/command_line.h"
#include "base/timer/timer.h"
#include "chrome/browser/chromeos/login/mixin_based_browser_test.h"
#include "chrome/browser/chromeos/login/screenshot_testing/screenshot_tester.h"
#include "content/public/test/browser_test_base.h"
namespace chromeos {
// Base mixin class for tests which support testing with screenshots.
// Sets up everything required for taking screenshots.
// Provides functionality to deal with animation load: screenshots
// should be taken only when all the animation is loaded.
class ScreenshotTestingMixin : public MixinBasedBrowserTest::Mixin {
public:
ScreenshotTestingMixin();
~ScreenshotTestingMixin() override;
// Override from BrowsertestBase::Mixin.
void SetUpInProcessBrowserTestFixture() override;
// Override from BrowsertestBase::Mixin.
void SetUpCommandLine(base::CommandLine* command_line) override;
// Runs screenshot testing if it is turned on by command line switches.
void RunScreenshotTesting(const std::string& test_name);
// Remembers that area |area| should be ignored during comparison.
void IgnoreArea(const SkIRect& area);
private:
// It turns out that it takes some more time for the animation
// to finish loading even after all the notifications have been sent.
// That happens due to some properties of compositor.
// This method should be used after getting all the necessary notifications
// to wait for the actual load of animation.
void SynchronizeAnimationLoadWithCompositor();
// This method exists only because of the current implementation of
// SynchronizeAnimationLoadWithCompositor.
void HandleAnimationLoad();
// Required for current implementation of
// SynchronizeAnimationLoadWithCompositor()
base::OneShotTimer timer_;
base::Closure animation_waiter_quitter_;
// Is true if testing with screenshots is turned on with all proper switches.
bool enable_test_screenshots_;
// |screenshot_tester_ | does everything connected with taking, loading and
// comparing screenshots
ScreenshotTester screenshot_tester_;
};
} // namespace chromeos
#endif // CHROME_BROWSER_CHROMEOS_LOGIN_SCREENSHOT_TESTING_SCREENSHOT_TESTING_MIXIN_H_
| null | null | null | null | 53,388 |
65,670 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 65,670 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_PASSWORDS_PASSWORDS_MODEL_DELEGATE_MOCK_H_
#define CHROME_BROWSER_UI_PASSWORDS_PASSWORDS_MODEL_DELEGATE_MOCK_H_
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "chrome/browser/ui/passwords/passwords_model_delegate.h"
#include "testing/gmock/include/gmock/gmock.h"
class PasswordsModelDelegateMock
: public PasswordsModelDelegate,
public base::SupportsWeakPtr<PasswordsModelDelegateMock>{
public:
PasswordsModelDelegateMock();
~PasswordsModelDelegateMock() override;
MOCK_CONST_METHOD0(GetWebContents, content::WebContents*());
MOCK_METHOD0(GetPasswordFormMetricsRecorder,
password_manager::PasswordFormMetricsRecorder*());
MOCK_CONST_METHOD0(GetOrigin, const GURL&());
MOCK_CONST_METHOD0(GetState, password_manager::ui::State());
MOCK_CONST_METHOD0(GetPendingPassword, const autofill::PasswordForm&());
MOCK_CONST_METHOD0(GetCredentialSource,
password_manager::metrics_util::CredentialSourceType());
MOCK_CONST_METHOD0(
GetCurrentForms,
const std::vector<std::unique_ptr<autofill::PasswordForm>>&());
MOCK_CONST_METHOD0(GetCurrentInteractionStats,
password_manager::InteractionsStats*());
MOCK_CONST_METHOD0(BubbleIsManualFallbackForSaving, bool());
MOCK_METHOD0(OnBubbleShown, void());
MOCK_METHOD0(OnBubbleHidden, void());
MOCK_METHOD0(OnNoInteraction, void());
MOCK_METHOD0(OnNopeUpdateClicked, void());
MOCK_METHOD0(NeverSavePassword, void());
MOCK_METHOD1(UpdatePassword, void(const autofill::PasswordForm&));
MOCK_METHOD2(SavePassword,
void(const base::string16&, const base::string16&));
MOCK_METHOD2(ChooseCredential, void(const autofill::PasswordForm&,
password_manager::CredentialType));
MOCK_METHOD0(NavigateToSmartLockPage, void());
MOCK_METHOD0(NavigateToSmartLockHelpPage, void());
MOCK_METHOD0(NavigateToPasswordManagerAccountDashboard, void());
MOCK_METHOD0(NavigateToPasswordManagerSettingsPage, void());
MOCK_METHOD1(EnableSync, void(const AccountInfo& account));
MOCK_METHOD0(OnDialogHidden, void());
MOCK_METHOD0(AuthenticateUser, bool());
MOCK_CONST_METHOD0(ArePasswordsRevealedWhenBubbleIsOpened, bool());
private:
DISALLOW_COPY_AND_ASSIGN(PasswordsModelDelegateMock);
};
#endif // CHROME_BROWSER_UI_PASSWORDS_PASSWORDS_MODEL_DELEGATE_MOCK_H_
| null | null | null | null | 62,533 |
35,055 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 35,055 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/*
* Copyright (C) 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_HTML_PARSER_HTML_PARSER_IDIOMS_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_HTML_PARSER_HTML_PARSER_IDIOMS_H_
#include "third_party/blink/renderer/core/core_export.h"
#include "third_party/blink/renderer/core/dom/qualified_name.h"
#include "third_party/blink/renderer/platform/decimal.h"
#include "third_party/blink/renderer/platform/wtf/forward.h"
#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
namespace WTF {
class TextEncoding;
}
namespace blink {
// Strip leading and trailing whitespace as defined by the HTML specification.
String StripLeadingAndTrailingHTMLSpaces(const String&);
// An implementation of the HTML specification's algorithm to convert a number
// to a string for number and range types.
String SerializeForNumberType(const Decimal&);
String SerializeForNumberType(double);
// Convert the specified string to a decimal/double. If the conversion fails,
// the return value is fallback value or NaN if not specified. Leading or
// trailing illegal characters cause failure, as does passing an empty string.
// The double* parameter may be 0 to check if the string can be parsed without
// getting the result.
Decimal ParseToDecimalForNumberType(
const String&,
const Decimal& fallback_value = Decimal::Nan());
CORE_EXPORT double ParseToDoubleForNumberType(
const String&,
double fallback_value = std::numeric_limits<double>::quiet_NaN());
// http://www.whatwg.org/specs/web-apps/current-work/#rules-for-parsing-integers
CORE_EXPORT bool ParseHTMLInteger(const String&, int&);
// http://www.whatwg.org/specs/web-apps/current-work/#rules-for-parsing-non-negative-integers
CORE_EXPORT bool ParseHTMLNonNegativeInteger(const String&, unsigned&);
// https://html.spec.whatwg.org/multipage/common-dom-interfaces.html#clamped-to-the-range
// without default value processing.
bool ParseHTMLClampedNonNegativeInteger(const String&,
unsigned min,
unsigned max,
unsigned&);
// https://html.spec.whatwg.org/multipage/infrastructure.html#rules-for-parsing-a-list-of-floating-point-numbers
CORE_EXPORT Vector<double> ParseHTMLListOfFloatingPointNumbers(const String&);
typedef Vector<std::pair<String, String>> HTMLAttributeList;
// The returned encoding might not be valid.
WTF::TextEncoding EncodingFromMetaAttributes(const HTMLAttributeList&);
// Space characters as defined by the HTML specification.
template <typename CharType>
inline bool IsHTMLSpace(CharType character) {
// Histogram from Apple's page load test combined with some ad hoc browsing
// some other test suites.
//
// 82%: 216330 non-space characters, all > U+0020
// 11%: 30017 plain space characters, U+0020
// 5%: 12099 newline characters, U+000A
// 2%: 5346 tab characters, U+0009
//
// No other characters seen. No U+000C or U+000D, and no other control
// characters. Accordingly, we check for non-spaces first, then space, then
// newline, then tab, then the other characters.
return character <= ' ' &&
(character == ' ' || character == '\n' || character == '\t' ||
character == '\r' || character == '\f');
}
template <typename CharType>
inline bool IsComma(CharType character) {
return character == ',';
}
template <typename CharType>
inline bool IsHTMLSpaceOrComma(CharType character) {
return IsComma(character) || IsHTMLSpace(character);
}
inline bool IsHTMLLineBreak(UChar character) {
return character <= '\r' && (character == '\n' || character == '\r');
}
template <typename CharType>
inline bool IsNotHTMLSpace(CharType character) {
return !IsHTMLSpace<CharType>(character);
}
bool ThreadSafeMatch(const QualifiedName&, const QualifiedName&);
bool ThreadSafeMatch(const String&, const QualifiedName&);
enum CharacterWidth { kLikely8Bit, kForce8Bit, kForce16Bit };
String AttemptStaticStringCreation(const LChar*, size_t);
String AttemptStaticStringCreation(const UChar*, size_t, CharacterWidth);
template <size_t inlineCapacity>
inline static String AttemptStaticStringCreation(
const Vector<UChar, inlineCapacity>& vector,
CharacterWidth width) {
return AttemptStaticStringCreation(vector.data(), vector.size(), width);
}
inline static String AttemptStaticStringCreation(const String str) {
if (!str.Is8Bit())
return AttemptStaticStringCreation(str.Characters16(), str.length(),
kForce16Bit);
return AttemptStaticStringCreation(str.Characters8(), str.length());
}
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_HTML_PARSER_HTML_PARSER_IDIOMS_H_
| null | null | null | null | 31,918 |
666 | null |
train_val
|
1b0d3845b454eaaac0b2064c78926ca4d739a080
| 263,234 |
qemu
| 0 |
https://github.com/bonzini/qemu
|
2016-10-18 11:40:27+01:00
|
/* from valgrind tests */
/* ================ sha1.c ================ */
/*
SHA-1 in C
By Steve Reid <steve@edmweb.com>
100% Public Domain
Test Vectors (from FIPS PUB 180-1)
"abc"
A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
A million repetitions of "a"
34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
*/
/* #define LITTLE_ENDIAN * This should be #define'd already, if true. */
/* #define SHA1HANDSOFF * Copies data before messing with it. */
#define SHA1HANDSOFF
#include <stdio.h>
#include <string.h>
#include <stdint.h>
/* ================ sha1.h ================ */
/*
SHA-1 in C
By Steve Reid <steve@edmweb.com>
100% Public Domain
*/
typedef struct {
uint32_t state[5];
uint32_t count[2];
unsigned char buffer[64];
} SHA1_CTX;
void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]);
void SHA1Init(SHA1_CTX* context);
void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len);
void SHA1Final(unsigned char digest[20], SHA1_CTX* context);
/* ================ end of sha1.h ================ */
#include <endian.h>
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
/* blk0() and blk() perform the initial expand. */
/* I got the idea of expanding during the round function from SSLeay */
#if BYTE_ORDER == LITTLE_ENDIAN
#define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
|(rol(block->l[i],8)&0x00FF00FF))
#elif BYTE_ORDER == BIG_ENDIAN
#define blk0(i) block->l[i]
#else
#error "Endianness not defined!"
#endif
#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
^block->l[(i+2)&15]^block->l[i&15],1))
/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
/* Hash a single 512-bit block. This is the core of the algorithm. */
void SHA1Transform(uint32_t state[5], const unsigned char buffer[64])
{
uint32_t a, b, c, d, e;
typedef union {
unsigned char c[64];
uint32_t l[16];
} CHAR64LONG16;
#ifdef SHA1HANDSOFF
CHAR64LONG16 block[1]; /* use array to appear as a pointer */
memcpy(block, buffer, 64);
#else
/* The following had better never be used because it causes the
* pointer-to-const buffer to be cast into a pointer to non-const.
* And the result is written through. I threw a "const" in, hoping
* this will cause a diagnostic.
*/
CHAR64LONG16* block = (const CHAR64LONG16*)buffer;
#endif
/* Copy context->state[] to working vars */
a = state[0];
b = state[1];
c = state[2];
d = state[3];
e = state[4];
/* 4 rounds of 20 operations each. Loop unrolled. */
R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
/* Add the working vars back into context.state[] */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
state[4] += e;
/* Wipe variables */
a = b = c = d = e = 0;
#ifdef SHA1HANDSOFF
memset(block, '\0', sizeof(block));
#endif
}
/* SHA1Init - Initialize new context */
void SHA1Init(SHA1_CTX* context)
{
/* SHA1 initialization constants */
context->state[0] = 0x67452301;
context->state[1] = 0xEFCDAB89;
context->state[2] = 0x98BADCFE;
context->state[3] = 0x10325476;
context->state[4] = 0xC3D2E1F0;
context->count[0] = context->count[1] = 0;
}
/* Run your data through this. */
void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len)
{
uint32_t i;
uint32_t j;
j = context->count[0];
if ((context->count[0] += len << 3) < j)
context->count[1]++;
context->count[1] += (len>>29);
j = (j >> 3) & 63;
if ((j + len) > 63) {
memcpy(&context->buffer[j], data, (i = 64-j));
SHA1Transform(context->state, context->buffer);
for ( ; i + 63 < len; i += 64) {
SHA1Transform(context->state, &data[i]);
}
j = 0;
}
else i = 0;
memcpy(&context->buffer[j], &data[i], len - i);
}
/* Add padding and return the message digest. */
void SHA1Final(unsigned char digest[20], SHA1_CTX* context)
{
unsigned i;
unsigned char finalcount[8];
unsigned char c;
#if 0 /* untested "improvement" by DHR */
/* Convert context->count to a sequence of bytes
* in finalcount. Second element first, but
* big-endian order within element.
* But we do it all backwards.
*/
unsigned char *fcp = &finalcount[8];
for (i = 0; i < 2; i++)
{
uint32_t t = context->count[i];
int j;
for (j = 0; j < 4; t >>= 8, j++)
*--fcp = (unsigned char) t;
}
#else
for (i = 0; i < 8; i++) {
finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)]
>> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
}
#endif
c = 0200;
SHA1Update(context, &c, 1);
while ((context->count[0] & 504) != 448) {
c = 0000;
SHA1Update(context, &c, 1);
}
SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */
for (i = 0; i < 20; i++) {
digest[i] = (unsigned char)
((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
}
/* Wipe variables */
memset(context, '\0', sizeof(*context));
memset(&finalcount, '\0', sizeof(finalcount));
}
/* ================ end of sha1.c ================ */
#define BUFSIZE 4096
int
main(int argc, char **argv)
{
SHA1_CTX ctx;
unsigned char hash[20], buf[BUFSIZE];
int i;
for(i=0;i<BUFSIZE;i++)
buf[i] = i;
SHA1Init(&ctx);
for(i=0;i<1000;i++)
SHA1Update(&ctx, buf, BUFSIZE);
SHA1Final(hash, &ctx);
printf("SHA1=");
for(i=0;i<20;i++)
printf("%02x", hash[i]);
printf("\n");
return 0;
}
| null | null | null | null | 121,358 |
37,345 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 37,345 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/loader/fetch/resource_request.h"
#include <memory>
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/platform/web_url_request.h"
#include "third_party/blink/renderer/platform/network/encoded_form_data.h"
#include "third_party/blink/renderer/platform/weborigin/kurl.h"
#include "third_party/blink/renderer/platform/weborigin/referrer.h"
#include "third_party/blink/renderer/platform/wtf/text/atomic_string.h"
namespace blink {
TEST(ResourceRequestTest, CrossThreadResourceRequestData) {
ResourceRequest original;
original.SetURL(KURL("http://www.example.com/test.htm"));
original.SetCacheMode(mojom::FetchCacheMode::kDefault);
original.SetTimeoutInterval(10);
original.SetSiteForCookies(KURL("http://www.example.com/first_party.htm"));
original.SetRequestorOrigin(
SecurityOrigin::Create(KURL("http://www.example.com/first_party.htm")));
original.SetHTTPMethod(HTTPNames::GET);
original.SetHTTPHeaderField(AtomicString("Foo"), AtomicString("Bar"));
original.SetHTTPHeaderField(AtomicString("Piyo"), AtomicString("Fuga"));
original.SetPriority(ResourceLoadPriority::kLow, 20);
scoped_refptr<EncodedFormData> original_body(
EncodedFormData::Create("Test Body"));
original.SetHTTPBody(original_body);
original.SetAllowStoredCredentials(false);
original.SetReportUploadProgress(false);
original.SetHasUserGesture(false);
original.SetDownloadToFile(false);
original.SetSkipServiceWorker(false);
original.SetFetchRequestMode(network::mojom::FetchRequestMode::kCORS);
original.SetFetchCredentialsMode(
network::mojom::FetchCredentialsMode::kSameOrigin);
original.SetRequestorID(30);
original.SetPluginChildID(40);
original.SetAppCacheHostID(50);
original.SetRequestContext(WebURLRequest::kRequestContextAudio);
original.SetFrameType(network::mojom::RequestContextFrameType::kNested);
original.SetHTTPReferrer(
Referrer("http://www.example.com/referrer.htm", kReferrerPolicyDefault));
EXPECT_STREQ("http://www.example.com/test.htm",
original.Url().GetString().Utf8().data());
EXPECT_EQ(mojom::FetchCacheMode::kDefault, original.GetCacheMode());
EXPECT_EQ(10, original.TimeoutInterval());
EXPECT_STREQ("http://www.example.com/first_party.htm",
original.SiteForCookies().GetString().Utf8().data());
EXPECT_STREQ("www.example.com",
original.RequestorOrigin()->Host().Utf8().data());
EXPECT_STREQ("GET", original.HttpMethod().Utf8().data());
EXPECT_STREQ("Bar", original.HttpHeaderFields().Get("Foo").Utf8().data());
EXPECT_STREQ("Fuga", original.HttpHeaderFields().Get("Piyo").Utf8().data());
EXPECT_EQ(ResourceLoadPriority::kLow, original.Priority());
EXPECT_STREQ("Test Body",
original.HttpBody()->FlattenToString().Utf8().data());
EXPECT_FALSE(original.AllowStoredCredentials());
EXPECT_FALSE(original.ReportUploadProgress());
EXPECT_FALSE(original.HasUserGesture());
EXPECT_FALSE(original.DownloadToFile());
EXPECT_FALSE(original.GetSkipServiceWorker());
EXPECT_EQ(network::mojom::FetchRequestMode::kCORS,
original.GetFetchRequestMode());
EXPECT_EQ(network::mojom::FetchCredentialsMode::kSameOrigin,
original.GetFetchCredentialsMode());
EXPECT_EQ(30, original.RequestorID());
EXPECT_EQ(40, original.GetPluginChildID());
EXPECT_EQ(50, original.AppCacheHostID());
EXPECT_EQ(WebURLRequest::kRequestContextAudio, original.GetRequestContext());
EXPECT_EQ(network::mojom::RequestContextFrameType::kNested,
original.GetFrameType());
EXPECT_STREQ("http://www.example.com/referrer.htm",
original.HttpReferrer().Utf8().data());
EXPECT_EQ(kReferrerPolicyDefault, original.GetReferrerPolicy());
std::unique_ptr<CrossThreadResourceRequestData> data1(original.CopyData());
ResourceRequest copy1(data1.get());
EXPECT_STREQ("http://www.example.com/test.htm",
copy1.Url().GetString().Utf8().data());
EXPECT_EQ(mojom::FetchCacheMode::kDefault, copy1.GetCacheMode());
EXPECT_EQ(10, copy1.TimeoutInterval());
EXPECT_STREQ("http://www.example.com/first_party.htm",
copy1.SiteForCookies().GetString().Utf8().data());
EXPECT_STREQ("www.example.com",
copy1.RequestorOrigin()->Host().Utf8().data());
EXPECT_STREQ("GET", copy1.HttpMethod().Utf8().data());
EXPECT_STREQ("Bar", copy1.HttpHeaderFields().Get("Foo").Utf8().data());
EXPECT_EQ(ResourceLoadPriority::kLow, copy1.Priority());
EXPECT_STREQ("Test Body", copy1.HttpBody()->FlattenToString().Utf8().data());
EXPECT_FALSE(copy1.AllowStoredCredentials());
EXPECT_FALSE(copy1.ReportUploadProgress());
EXPECT_FALSE(copy1.HasUserGesture());
EXPECT_FALSE(copy1.DownloadToFile());
EXPECT_FALSE(copy1.GetSkipServiceWorker());
EXPECT_EQ(network::mojom::FetchRequestMode::kCORS,
copy1.GetFetchRequestMode());
EXPECT_EQ(network::mojom::FetchCredentialsMode::kSameOrigin,
copy1.GetFetchCredentialsMode());
EXPECT_EQ(30, copy1.RequestorID());
EXPECT_EQ(40, copy1.GetPluginChildID());
EXPECT_EQ(50, copy1.AppCacheHostID());
EXPECT_EQ(WebURLRequest::kRequestContextAudio, copy1.GetRequestContext());
EXPECT_EQ(network::mojom::RequestContextFrameType::kNested,
copy1.GetFrameType());
EXPECT_STREQ("http://www.example.com/referrer.htm",
copy1.HttpReferrer().Utf8().data());
EXPECT_EQ(kReferrerPolicyDefault, copy1.GetReferrerPolicy());
copy1.SetAllowStoredCredentials(true);
copy1.SetReportUploadProgress(true);
copy1.SetHasUserGesture(true);
copy1.SetDownloadToFile(true);
copy1.SetSkipServiceWorker(true);
copy1.SetFetchRequestMode(network::mojom::FetchRequestMode::kNoCORS);
copy1.SetFetchCredentialsMode(network::mojom::FetchCredentialsMode::kInclude);
std::unique_ptr<CrossThreadResourceRequestData> data2(copy1.CopyData());
ResourceRequest copy2(data2.get());
EXPECT_TRUE(copy2.AllowStoredCredentials());
EXPECT_TRUE(copy2.ReportUploadProgress());
EXPECT_TRUE(copy2.HasUserGesture());
EXPECT_TRUE(copy2.DownloadToFile());
EXPECT_TRUE(copy2.GetSkipServiceWorker());
EXPECT_EQ(network::mojom::FetchRequestMode::kNoCORS,
copy1.GetFetchRequestMode());
EXPECT_EQ(network::mojom::FetchCredentialsMode::kInclude,
copy1.GetFetchCredentialsMode());
}
TEST(ResourceRequestTest, SetHasUserGesture) {
ResourceRequest original;
EXPECT_FALSE(original.HasUserGesture());
original.SetHasUserGesture(true);
EXPECT_TRUE(original.HasUserGesture());
original.SetHasUserGesture(false);
EXPECT_TRUE(original.HasUserGesture());
}
TEST(ResourceRequestTest, SetIsAdResource) {
ResourceRequest original;
EXPECT_FALSE(original.IsAdResource());
original.SetIsAdResource();
EXPECT_TRUE(original.IsAdResource());
// Should persist across redirects.
std::unique_ptr<ResourceRequest> redirect_request =
original.CreateRedirectRequest(
KURL("https://example.test/redirect"), original.HttpMethod(),
original.SiteForCookies(), original.HttpReferrer(),
original.GetReferrerPolicy(), original.GetSkipServiceWorker());
EXPECT_TRUE(redirect_request->IsAdResource());
}
} // namespace blink
| null | null | null | null | 34,208 |
13,327 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 178,322 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* linux/arch/m68k/tools/amiga/dmesg.c -- Retrieve the kernel messages stored
* in Chip RAM with the kernel command
* line option `debug=mem'.
*
* © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org>
*
*
* Usage:
*
* dmesg
* dmesg <CHIPMEM_END>
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define CHIPMEM_START 0x00000000
#define CHIPMEM_END 0x00200000 /* overridden by argv[1] */
#define SAVEKMSG_MAGIC1 0x53415645 /* 'SAVE' */
#define SAVEKMSG_MAGIC2 0x4B4D5347 /* 'KMSG' */
struct savekmsg {
u_long magic1; /* SAVEKMSG_MAGIC1 */
u_long magic2; /* SAVEKMSG_MAGIC2 */
u_long magicptr; /* address of magic1 */
u_long size;
char data[0];
};
int main(int argc, char *argv[])
{
u_long start = CHIPMEM_START, end = CHIPMEM_END, p;
int found = 0;
struct savekmsg *m = NULL;
if (argc >= 2)
end = strtoul(argv[1], NULL, 0);
printf("Searching for SAVEKMSG magic...\n");
for (p = start; p <= end-sizeof(struct savekmsg); p += 4) {
m = (struct savekmsg *)p;
if ((m->magic1 == SAVEKMSG_MAGIC1) && (m->magic2 == SAVEKMSG_MAGIC2) &&
(m->magicptr == p)) {
found = 1;
break;
}
}
if (!found)
printf("Not found\n");
else {
printf("Found %ld bytes at 0x%08lx\n", m->size, (u_long)&m->data);
puts(">>>>>>>>>>>>>>>>>>>>");
fflush(stdout);
write(1, &m->data, m->size);
fflush(stdout);
puts("<<<<<<<<<<<<<<<<<<<<");
}
return(0);
}
| null | null | null | null | 86,669 |
20,535 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 20,535 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_TEST_MOCK_RENDER_PROCESS_H_
#define CONTENT_TEST_MOCK_RENDER_PROCESS_H_
#include "base/macros.h"
#include "content/renderer/render_process.h"
namespace content {
// This class is a mock of the child process singleton which we use during
// running of the RenderView unit tests.
class MockRenderProcess : public RenderProcess {
public:
MockRenderProcess();
~MockRenderProcess() override;
// RenderProcess implementation.
void AddBindings(int bindings) override;
int GetEnabledBindings() const override;
private:
int enabled_bindings_;
DISALLOW_COPY_AND_ASSIGN(MockRenderProcess);
};
} // namespace content
#endif // CONTENT_TEST_MOCK_RENDER_PROCESS_H_
| null | null | null | null | 17,398 |
43,493 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 208,488 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* linux/include/linux/edd.h
* Copyright (C) 2002, 2003, 2004 Dell Inc.
* by Matt Domsch <Matt_Domsch@dell.com>
*
* structures and definitions for the int 13h, ax={41,48}h
* BIOS Enhanced Disk Drive Services
* This is based on the T13 group document D1572 Revision 0 (August 14 2002)
* available at http://www.t13.org/docs2002/d1572r0.pdf. It is
* very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf
*
* In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch
* table in the boot_params that contains a list of BIOS-enumerated
* boot devices.
* In arch/{i386,x86_64}/kernel/setup.c, this information is
* transferred into the edd structure, and in drivers/firmware/edd.c, that
* information is used to identify BIOS boot disk. The code in setup.S
* is very sensitive to the size of these structures.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License v2.0 as published by
* the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_EDD_H
#define _UAPI_LINUX_EDD_H
#include <linux/types.h>
#define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF
in boot_params - treat this as 1 byte */
#define EDDBUF 0xd00 /* addr of edd_info structs in boot_params */
#define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */
#define EDDEXTSIZE 8 /* change these if you muck with the structures */
#define EDDPARMSIZE 74
#define CHECKEXTENSIONSPRESENT 0x41
#define GETDEVICEPARAMETERS 0x48
#define LEGACYGETDEVICEPARAMETERS 0x08
#define EDDMAGIC1 0x55AA
#define EDDMAGIC2 0xAA55
#define READ_SECTORS 0x02 /* int13 AH=0x02 is READ_SECTORS command */
#define EDD_MBR_SIG_OFFSET 0x1B8 /* offset of signature in the MBR */
#define EDD_MBR_SIG_BUF 0x290 /* addr in boot params */
#define EDD_MBR_SIG_MAX 16 /* max number of signatures to store */
#define EDD_MBR_SIG_NR_BUF 0x1ea /* addr of number of MBR signtaures at EDD_MBR_SIG_BUF
in boot_params - treat this as 1 byte */
#ifndef __ASSEMBLY__
#define EDD_EXT_FIXED_DISK_ACCESS (1 << 0)
#define EDD_EXT_DEVICE_LOCKING_AND_EJECTING (1 << 1)
#define EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT (1 << 2)
#define EDD_EXT_64BIT_EXTENSIONS (1 << 3)
#define EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT (1 << 0)
#define EDD_INFO_GEOMETRY_VALID (1 << 1)
#define EDD_INFO_REMOVABLE (1 << 2)
#define EDD_INFO_WRITE_VERIFY (1 << 3)
#define EDD_INFO_MEDIA_CHANGE_NOTIFICATION (1 << 4)
#define EDD_INFO_LOCKABLE (1 << 5)
#define EDD_INFO_NO_MEDIA_PRESENT (1 << 6)
#define EDD_INFO_USE_INT13_FN50 (1 << 7)
struct edd_device_params {
__u16 length;
__u16 info_flags;
__u32 num_default_cylinders;
__u32 num_default_heads;
__u32 sectors_per_track;
__u64 number_of_sectors;
__u16 bytes_per_sector;
__u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */
__u16 key; /* = 0xBEDD */
__u8 device_path_info_length; /* = 44 */
__u8 reserved2;
__u16 reserved3;
__u8 host_bus_type[4];
__u8 interface_type[8];
union {
struct {
__u16 base_address;
__u16 reserved1;
__u32 reserved2;
} __attribute__ ((packed)) isa;
struct {
__u8 bus;
__u8 slot;
__u8 function;
__u8 channel;
__u32 reserved;
} __attribute__ ((packed)) pci;
/* pcix is same as pci */
struct {
__u64 reserved;
} __attribute__ ((packed)) ibnd;
struct {
__u64 reserved;
} __attribute__ ((packed)) xprs;
struct {
__u64 reserved;
} __attribute__ ((packed)) htpt;
struct {
__u64 reserved;
} __attribute__ ((packed)) unknown;
} interface_path;
union {
struct {
__u8 device;
__u8 reserved1;
__u16 reserved2;
__u32 reserved3;
__u64 reserved4;
} __attribute__ ((packed)) ata;
struct {
__u8 device;
__u8 lun;
__u8 reserved1;
__u8 reserved2;
__u32 reserved3;
__u64 reserved4;
} __attribute__ ((packed)) atapi;
struct {
__u16 id;
__u64 lun;
__u16 reserved1;
__u32 reserved2;
} __attribute__ ((packed)) scsi;
struct {
__u64 serial_number;
__u64 reserved;
} __attribute__ ((packed)) usb;
struct {
__u64 eui;
__u64 reserved;
} __attribute__ ((packed)) i1394;
struct {
__u64 wwid;
__u64 lun;
} __attribute__ ((packed)) fibre;
struct {
__u64 identity_tag;
__u64 reserved;
} __attribute__ ((packed)) i2o;
struct {
__u32 array_number;
__u32 reserved1;
__u64 reserved2;
} __attribute__ ((packed)) raid;
struct {
__u8 device;
__u8 reserved1;
__u16 reserved2;
__u32 reserved3;
__u64 reserved4;
} __attribute__ ((packed)) sata;
struct {
__u64 reserved1;
__u64 reserved2;
} __attribute__ ((packed)) unknown;
} device_path;
__u8 reserved4;
__u8 checksum;
} __attribute__ ((packed));
struct edd_info {
__u8 device;
__u8 version;
__u16 interface_support;
__u16 legacy_max_cylinder;
__u8 legacy_max_head;
__u8 legacy_sectors_per_track;
struct edd_device_params params;
} __attribute__ ((packed));
struct edd {
unsigned int mbr_signature[EDD_MBR_SIG_MAX];
struct edd_info edd_info[EDDMAXNR];
unsigned char mbr_signature_nr;
unsigned char edd_info_nr;
};
#endif /*!__ASSEMBLY__ */
#endif /* _UAPI_LINUX_EDD_H */
| null | null | null | null | 116,835 |
11,314 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 11,314 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
namespace gpu {
GpuMemoryBufferManager::GpuMemoryBufferManager() = default;
GpuMemoryBufferManager::~GpuMemoryBufferManager() = default;
} // namespace gpu
| null | null | null | null | 8,177 |
6,751 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 171,746 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* EBCDIC -> ASCII, ASCII -> EBCDIC conversion routines.
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _EBCDIC_H
#define _EBCDIC_H
#ifndef _S390_TYPES_H
#include <types.h>
#endif
extern __u8 _ascebc_500[256]; /* ASCII -> EBCDIC 500 conversion table */
extern __u8 _ebcasc_500[256]; /* EBCDIC 500 -> ASCII conversion table */
extern __u8 _ascebc[256]; /* ASCII -> EBCDIC conversion table */
extern __u8 _ebcasc[256]; /* EBCDIC -> ASCII conversion table */
extern __u8 _ebc_tolower[256]; /* EBCDIC -> lowercase */
extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */
static inline void
codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
{
if (nr-- <= 0)
return;
asm volatile(
" bras 1,1f\n"
" tr 0(1,%0),0(%2)\n"
"0: tr 0(256,%0),0(%2)\n"
" la %0,256(%0)\n"
"1: ahi %1,-256\n"
" jnm 0b\n"
" ex %1,0(1)"
: "+&a" (addr), "+&a" (nr)
: "a" (codepage) : "cc", "memory", "1");
}
#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
#define EBCASC(addr,nr) codepage_convert(_ebcasc, addr, nr)
#define ASCEBC_500(addr,nr) codepage_convert(_ascebc_500, addr, nr)
#define EBCASC_500(addr,nr) codepage_convert(_ebcasc_500, addr, nr)
#define EBC_TOLOWER(addr,nr) codepage_convert(_ebc_tolower, addr, nr)
#define EBC_TOUPPER(addr,nr) codepage_convert(_ebc_toupper, addr, nr)
#endif
| null | null | null | null | 80,093 |
38,404 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 38,404 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/*
* Copyright (C) 2012 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "third_party/blink/renderer/platform/instrumentation/platform_instrumentation.h"
namespace blink {
const char PlatformInstrumentation::kCategoryName[] =
TRACE_DISABLED_BY_DEFAULT("devtools.timeline");
const char PlatformInstrumentation::kImageDecodeEvent[] = "Decode Image";
const char PlatformInstrumentation::kImageResizeEvent[] = "Resize Image";
const char PlatformInstrumentation::kDrawLazyPixelRefEvent[] =
"Draw LazyPixelRef";
const char PlatformInstrumentation::kDecodeLazyPixelRefEvent[] =
"Decode LazyPixelRef";
const char PlatformInstrumentation::kLazyPixelRef[] = "LazyPixelRef";
const char PlatformInstrumentation::kImageTypeArgument[] = "imageType";
const char PlatformInstrumentation::kCachedArgument[] = "cached";
} // namespace blink
| null | null | null | null | 35,267 |
32,037 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 197,032 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* EISA "eeprom" support routines
*
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/eisa_eeprom.h>
#define EISA_EEPROM_MINOR 241
static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin)
{
return fixed_size_llseek(file, offset, origin, HPEE_MAX_LENGTH);
}
static ssize_t eisa_eeprom_read(struct file * file,
char __user *buf, size_t count, loff_t *ppos )
{
unsigned char *tmp;
ssize_t ret;
int i;
if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH)
return 0;
count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos;
tmp = kmalloc(count, GFP_KERNEL);
if (tmp) {
for (i = 0; i < count; i++)
tmp[i] = readb(eisa_eeprom_addr+(*ppos)++);
if (copy_to_user (buf, tmp, count))
ret = -EFAULT;
else
ret = count;
kfree (tmp);
} else
ret = -ENOMEM;
return ret;
}
static int eisa_eeprom_open(struct inode *inode, struct file *file)
{
if (file->f_mode & FMODE_WRITE)
return -EINVAL;
return 0;
}
static int eisa_eeprom_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* The various file operations we support.
*/
static const struct file_operations eisa_eeprom_fops = {
.owner = THIS_MODULE,
.llseek = eisa_eeprom_llseek,
.read = eisa_eeprom_read,
.open = eisa_eeprom_open,
.release = eisa_eeprom_release,
};
static struct miscdevice eisa_eeprom_dev = {
EISA_EEPROM_MINOR,
"eisa_eeprom",
&eisa_eeprom_fops
};
static int __init eisa_eeprom_init(void)
{
int retval;
if (!eisa_eeprom_addr)
return -ENODEV;
retval = misc_register(&eisa_eeprom_dev);
if (retval < 0) {
printk(KERN_ERR "EISA EEPROM: cannot register misc device.\n");
return retval;
}
printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
return 0;
}
MODULE_LICENSE("GPL");
module_init(eisa_eeprom_init);
| null | null | null | null | 105,379 |
67,984 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 67,984 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef REMOTING_PROTOCOL_AUDIO_READER_H_
#define REMOTING_PROTOCOL_AUDIO_READER_H_
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "remoting/protocol/channel_dispatcher_base.h"
namespace remoting {
namespace protocol {
class AudioStub;
class AudioReader : public ChannelDispatcherBase {
public:
explicit AudioReader(AudioStub* audio_stub);
~AudioReader() override;
private:
void OnIncomingMessage(std::unique_ptr<CompoundBuffer> message) override;
AudioStub* audio_stub_;
DISALLOW_COPY_AND_ASSIGN(AudioReader);
};
} // namespace protocol
} // namespace remoting
#endif // REMOTING_PROTOCOL_AUDIO_READER_H_
| null | null | null | null | 64,847 |
17,060 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 17,060 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_VIZ_TEST_TEST_DISPLAY_PROVIDER_H_
#define COMPONENTS_VIZ_TEST_TEST_DISPLAY_PROVIDER_H_
#include <memory>
#include "components/viz/service/display/display.h"
#include "components/viz/service/display_embedder/display_provider.h"
#include "components/viz/test/test_shared_bitmap_manager.h"
namespace viz {
// Test implementation that creates a Display with a FakeOutputSurface.
class TestDisplayProvider : public DisplayProvider {
public:
TestDisplayProvider();
~TestDisplayProvider() override;
// DisplayProvider implementation.
std::unique_ptr<Display> CreateDisplay(
const FrameSinkId& frame_sink_id,
gpu::SurfaceHandle surface_handle,
bool gpu_compositing,
ExternalBeginFrameControllerImpl* external_begin_frame_controller,
const RendererSettings& renderer_settings,
std::unique_ptr<SyntheticBeginFrameSource>* out_begin_frame_source)
override;
private:
TestSharedBitmapManager shared_bitmap_manager_;
DISALLOW_COPY_AND_ASSIGN(TestDisplayProvider);
};
} // namespace viz
#endif // COMPONENTS_VIZ_TEST_TEST_DISPLAY_PROVIDER_H_
| null | null | null | null | 13,923 |
34,758 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 34,758 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/html/forms/html_text_area_element.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/platform/wtf/text/string_builder.h"
namespace blink {
TEST(HTMLTextAreaElementTest, SanitizeUserInputValue) {
UChar kLeadSurrogate = 0xD800;
EXPECT_EQ("", HTMLTextAreaElement::SanitizeUserInputValue("", 0));
EXPECT_EQ("", HTMLTextAreaElement::SanitizeUserInputValue("a", 0));
EXPECT_EQ("", HTMLTextAreaElement::SanitizeUserInputValue("\n", 0));
StringBuilder builder;
builder.Append(kLeadSurrogate);
String lead_surrogate = builder.ToString();
EXPECT_EQ("", HTMLTextAreaElement::SanitizeUserInputValue(lead_surrogate, 0));
EXPECT_EQ("", HTMLTextAreaElement::SanitizeUserInputValue("", 1));
EXPECT_EQ("", HTMLTextAreaElement::SanitizeUserInputValue(lead_surrogate, 1));
EXPECT_EQ("a", HTMLTextAreaElement::SanitizeUserInputValue("a", 1));
EXPECT_EQ("\n", HTMLTextAreaElement::SanitizeUserInputValue("\n", 1));
EXPECT_EQ("\n", HTMLTextAreaElement::SanitizeUserInputValue("\n", 2));
EXPECT_EQ("abc", HTMLTextAreaElement::SanitizeUserInputValue(
String("abc") + lead_surrogate, 4));
EXPECT_EQ("a\ncd", HTMLTextAreaElement::SanitizeUserInputValue("a\ncdef", 4));
EXPECT_EQ("a\rcd", HTMLTextAreaElement::SanitizeUserInputValue("a\rcdef", 4));
EXPECT_EQ("a\r\ncd",
HTMLTextAreaElement::SanitizeUserInputValue("a\r\ncdef", 4));
}
} // namespace blink
| null | null | null | null | 31,621 |
52,819 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 52,819 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/base/time_delta_interpolator.h"
#include <stdint.h>
#include <algorithm>
#include "base/logging.h"
#include "base/time/tick_clock.h"
#include "media/base/timestamp_constants.h"
namespace media {
TimeDeltaInterpolator::TimeDeltaInterpolator(const base::TickClock* tick_clock)
: tick_clock_(tick_clock),
interpolating_(false),
upper_bound_(kNoTimestamp),
playback_rate_(0) {
DCHECK(tick_clock_);
}
TimeDeltaInterpolator::~TimeDeltaInterpolator() = default;
base::TimeDelta TimeDeltaInterpolator::StartInterpolating() {
DCHECK(!interpolating_);
reference_ = tick_clock_->NowTicks();
interpolating_ = true;
return lower_bound_;
}
base::TimeDelta TimeDeltaInterpolator::StopInterpolating() {
DCHECK(interpolating_);
lower_bound_ = GetInterpolatedTime();
interpolating_ = false;
return lower_bound_;
}
void TimeDeltaInterpolator::SetPlaybackRate(double playback_rate) {
lower_bound_ = GetInterpolatedTime();
reference_ = tick_clock_->NowTicks();
playback_rate_ = playback_rate;
}
void TimeDeltaInterpolator::SetBounds(base::TimeDelta lower_bound,
base::TimeDelta upper_bound,
base::TimeTicks capture_time) {
DCHECK(lower_bound <= upper_bound);
DCHECK(lower_bound != kNoTimestamp);
lower_bound_ = std::max(base::TimeDelta(), lower_bound);
upper_bound_ = std::max(base::TimeDelta(), upper_bound);
reference_ = capture_time;
}
void TimeDeltaInterpolator::SetUpperBound(base::TimeDelta upper_bound) {
DCHECK(upper_bound != kNoTimestamp);
lower_bound_ = GetInterpolatedTime();
reference_ = tick_clock_->NowTicks();
upper_bound_ = upper_bound;
}
base::TimeDelta TimeDeltaInterpolator::GetInterpolatedTime() {
if (!interpolating_)
return lower_bound_;
int64_t now_us = (tick_clock_->NowTicks() - reference_).InMicroseconds();
now_us = static_cast<int64_t>(now_us * playback_rate_);
base::TimeDelta interpolated_time =
lower_bound_ + base::TimeDelta::FromMicroseconds(now_us);
if (upper_bound_ == kNoTimestamp)
return interpolated_time;
return std::min(interpolated_time, upper_bound_);
}
} // namespace media
| null | null | null | null | 49,682 |
24,546 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 24,546 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// ChangeLog:
// 2016-07-22 - Initial commit and adaption to use PagedArray.
// --Samuel Huang <huangs@chromium.org>
#include "courgette/third_party/divsufsort/divsufsort_private.h"
#define TR_INSERTIONSORT_THRESHOLD (8)
#define TR_STACKSIZE (64)
#define STACK_PUSH5(_a, _b, _c, _d, _e)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
} while(0)
#define STACK_POP5(_a, _b, _c, _d, _e)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
} while(0)
namespace divsuf {
namespace {
/*- Private Functions -*/
const saint_t lg_table[256]= {
-1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
};
inline
saint_t
tr_ilg(saidx_t n) {
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
}
/*---------------------------------------------------------------------------*/
/* Simple insertionsort for small size groups. */
void
tr_insertionsort(const_saidx_it ISAd, saidx_it first, saidx_it last) {
saidx_it a, b;
saidx_t t, r;
for(a = first + 1; a < last; ++a) {
for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
if(b < first) { break; }
}
if(r == 0) { *b = ~*b; }
*(b + 1) = t;
}
}
/*---------------------------------------------------------------------------*/
inline
void
tr_fixdown(const_saidx_it ISAd, saidx_it SA, saidx_t i, saidx_t size) {
saidx_t j, k;
saidx_t v;
saidx_t c, d, e;
for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = ISAd[SA[k = j++]];
if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
if(d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
void
tr_heapsort(const_saidx_it ISAd, saidx_it SA, saidx_t size) {
saidx_t i, m;
saidx_t t;
m = size;
if((size % 2) == 0) {
m--;
if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
}
for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
for(i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
tr_fixdown(ISAd, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
inline
saidx_it
tr_median3(const_saidx_it ISAd, saidx_it v1, saidx_it v2, saidx_it v3) {
saidx_it t;
if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
if(ISAd[*v2] > ISAd[*v3]) {
if(ISAd[*v1] > ISAd[*v3]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
inline
saidx_it
tr_median5(const_saidx_it ISAd,
saidx_it v1, saidx_it v2, saidx_it v3, saidx_it v4, saidx_it v5) {
saidx_it t;
if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
if(ISAd[*v3] > ISAd[*v4]) { return v4; }
return v3;
}
/* Returns the pivot element. */
inline
saidx_it
tr_pivot(const_saidx_it ISAd, saidx_it first, saidx_it last) {
saidx_it middle;
saidx_t t;
t = last - first;
middle = first + t / 2;
if(t <= 512) {
if(t <= 32) {
return tr_median3(ISAd, first, middle, last - 1);
} else {
t >>= 2;
return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = tr_median3(ISAd, first, first + t, first + (t << 1));
middle = tr_median3(ISAd, middle - t, middle, middle + t);
last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
return tr_median3(ISAd, first, middle, last);
}
/*---------------------------------------------------------------------------*/
typedef struct _trbudget_t trbudget_t;
struct _trbudget_t {
saidx_t chance;
saidx_t remain;
saidx_t incval;
saidx_t count;
};
inline
void
trbudget_init(trbudget_t *budget, saidx_t chance, saidx_t incval) {
budget->chance = chance;
budget->remain = budget->incval = incval;
}
inline
saint_t
trbudget_check(trbudget_t *budget, saidx_t size) {
if(size <= budget->remain) { budget->remain -= size; return 1; }
if(budget->chance == 0) { budget->count += size; return 0; }
budget->remain += budget->incval - size;
budget->chance -= 1;
return 1;
}
/*---------------------------------------------------------------------------*/
inline
void
tr_partition(const_saidx_it ISAd,
saidx_it first, saidx_it middle, saidx_it last,
saidx_it* pa, saidx_it* pb, saidx_t v) {
saidx_it a, b, c, d, e, f;
saidx_t t, s;
saidx_t x = 0;
for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
if(((a = b) < last) && (x < v)) {
for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
}
for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
if((b < (d = c)) && (x > v)) {
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
for(; b < c;) {
SWAP(*b, *c);
for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
if(a <= d) {
c = b - 1;
if((s = a - first) > (t = b - a)) { s = t; }
for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if((s = d - c) > (t = last - d - 1)) { s = t; }
for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
first += (b - a), last -= (d - c);
}
*pa = first, *pb = last;
}
void
tr_copy(saidx_it ISA, const_saidx_it SA,
saidx_it first, saidx_it a, saidx_it b, saidx_it last,
saidx_t depth) {
/* sort suffixes of middle partition
by using sorted order of suffixes of left and right partition. */
saidx_it c, d, e;
saidx_t s, v;
v = b - SA - 1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
ISA[s] = d - SA;
}
}
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
ISA[s] = d - SA;
}
}
}
void
tr_partialcopy(saidx_it ISA, const_saidx_it SA,
saidx_it first, saidx_it a, saidx_it b, saidx_it last,
saidx_t depth) {
saidx_it c, d, e;
saidx_t s, v;
saidx_t rank, lastrank, newrank = -1;
v = b - SA - 1;
lastrank = -1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
lastrank = -1;
for(e = d; first <= e; --e) {
rank = ISA[*e];
if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
if(newrank != rank) { ISA[*e] = newrank; }
}
lastrank = -1;
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
}
void
tr_introsort(saidx_it ISA, const_saidx_it ISAd,
saidx_it SA, saidx_it first, saidx_it last,
trbudget_t *budget) {
#define STACK_SIZE TR_STACKSIZE
struct { const_saidx_it a; saidx_it b, c; saint_t d, e; }stack[STACK_SIZE];
saidx_it a, b, c;
saidx_t t;
saidx_t v, x = 0;
saidx_t incr = ISAd - ISA;
saint_t limit, next;
saint_t ssize, trlink = -1;
for(ssize = 0, limit = tr_ilg(last - first);;) {
if(limit < 0) {
if(limit == -1) {
/* tandem repeat partition */
tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
/* update ranks */
if(a < last) {
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
}
if(b < last) {
for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
}
/* push */
if(1 < (b - a)) {
STACK_PUSH5(NULL, a, b, 0, 0);
STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
trlink = ssize - 2;
}
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
last = a, limit = tr_ilg(a - first);
} else if(1 < (last - b)) {
first = b, limit = tr_ilg(last - b);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
first = b, limit = tr_ilg(last - b);
} else if(1 < (a - first)) {
last = a, limit = tr_ilg(a - first);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else if(limit == -2) {
/* tandem repeat copy */
a = stack[--ssize].b, b = stack[ssize].c;
if(stack[ssize].d == 0) {
tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
}
STACK_POP5(ISAd, first, last, limit, trlink);
} else {
/* sorted partition */
if(0 <= *first) {
a = first;
do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
first = a;
}
if(first < last) {
a = first; do { *a = ~*a; } while(*++a < 0);
next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
/* push */
if(trbudget_check(budget, a - first)) {
if((a - first) <= (last - a)) {
STACK_PUSH5(ISAd, a, last, -3, trlink);
ISAd += incr, last = a, limit = next;
} else {
if(1 < (last - a)) {
STACK_PUSH5(ISAd + incr, first, a, next, trlink);
first = a, limit = -3;
} else {
ISAd += incr, last = a, limit = next;
}
}
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
if(1 < (last - a)) {
first = a, limit = -3;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
continue;
}
if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
tr_insertionsort(ISAd, first, last);
limit = -3;
continue;
}
if(limit-- == 0) {
tr_heapsort(ISAd, first, last - first);
for(a = last - 1; first < a; a = b) {
for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
}
limit = -3;
continue;
}
/* choose pivot */
a = tr_pivot(ISAd, first, last);
SWAP(*first, *a);
v = ISAd[*first];
/* partition */
tr_partition(ISAd, first, first + 1, last, &a, &b, v);
if((last - first) != (b - a)) {
next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
/* update ranks */
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
/* push */
if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
if((a - first) <= (last - b)) {
if((last - b) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((a - first) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
if((a - first) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((last - b) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
}
} else {
if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
first = b;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
last = a;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
} else {
if(trbudget_check(budget, last - first)) {
limit = tr_ilg(last - first), ISAd += incr;
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
#undef STACK_SIZE
}
} // namespace
/*---------------------------------------------------------------------------*/
/*- Function -*/
/* Tandem repeat sort */
void
trsort(saidx_it ISA, saidx_it SA, saidx_t n, saidx_t depth) {
saidx_it ISAd;
saidx_it first, last;
trbudget_t budget;
saidx_t t, skip, unsorted;
trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
first = SA;
skip = 0;
unsorted = 0;
do {
if((t = *first) < 0) { first -= t; skip += t; }
else {
if(skip != 0) { *(first + skip) = skip; skip = 0; }
last = SA + ISA[t] + 1;
if(1 < (last - first)) {
budget.count = 0;
tr_introsort(ISA, ISAd, SA, first, last, &budget);
if(budget.count != 0) { unsorted += budget.count; }
else { skip = first - last; }
} else if((last - first) == 1) {
skip = -1;
}
first = last;
}
} while(first < (SA + n));
if(skip != 0) { *(first + skip) = skip; }
if(unsorted == 0) { break; }
}
}
} // namespace divsuf
| null | null | null | null | 21,409 |
21,381 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 21,381 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_RTC_RTP_SENDER_H_
#define CONTENT_RENDERER_MEDIA_WEBRTC_RTC_RTP_SENDER_H_
#include <memory>
#include <vector>
#include "base/callback.h"
#include "content/common/content_export.h"
#include "content/renderer/media/webrtc/webrtc_media_stream_adapter_map.h"
#include "content/renderer/media/webrtc/webrtc_media_stream_track_adapter_map.h"
#include "third_party/blink/public/platform/web_media_stream_track.h"
#include "third_party/blink/public/platform/web_rtc_rtp_sender.h"
#include "third_party/blink/public/platform/web_rtc_stats.h"
#include "third_party/webrtc/api/peerconnectioninterface.h"
#include "third_party/webrtc/api/rtpsenderinterface.h"
#include "third_party/webrtc/rtc_base/scoped_ref_ptr.h"
namespace content {
// Used to surface |webrtc::RtpSenderInterface| to blink. Multiple
// |RTCRtpSender|s could reference the same webrtc sender; |id| is the value
// of the pointer to the webrtc sender.
class CONTENT_EXPORT RTCRtpSender : public blink::WebRTCRtpSender {
public:
static uintptr_t getId(const webrtc::RtpSenderInterface* webrtc_sender);
RTCRtpSender(
scoped_refptr<webrtc::PeerConnectionInterface> native_peer_connection,
scoped_refptr<base::SingleThreadTaskRunner> main_thread,
scoped_refptr<base::SingleThreadTaskRunner> signaling_thread,
scoped_refptr<WebRtcMediaStreamAdapterMap> stream_map,
rtc::scoped_refptr<webrtc::RtpSenderInterface> webrtc_sender,
blink::WebMediaStreamTrack web_track,
std::vector<blink::WebMediaStream> web_streams);
RTCRtpSender(
scoped_refptr<webrtc::PeerConnectionInterface> native_peer_connection,
scoped_refptr<base::SingleThreadTaskRunner> main_thread,
scoped_refptr<base::SingleThreadTaskRunner> signaling_thread,
scoped_refptr<WebRtcMediaStreamAdapterMap> stream_map,
rtc::scoped_refptr<webrtc::RtpSenderInterface> webrtc_sender,
std::unique_ptr<WebRtcMediaStreamTrackAdapterMap::AdapterRef> track_ref,
std::vector<std::unique_ptr<WebRtcMediaStreamAdapterMap::AdapterRef>>
stream_refs);
RTCRtpSender(const RTCRtpSender& other);
~RTCRtpSender() override;
RTCRtpSender& operator=(const RTCRtpSender& other);
// Creates a shallow copy of the sender, representing the same underlying
// webrtc sender as the original.
// TODO(hbos): Remove in favor of constructor. https://crbug.com/790007
std::unique_ptr<RTCRtpSender> ShallowCopy() const;
// blink::WebRTCRtpSender.
uintptr_t Id() const override;
blink::WebMediaStreamTrack Track() const override;
void ReplaceTrack(blink::WebMediaStreamTrack with_track,
blink::WebRTCVoidRequest request) override;
std::unique_ptr<blink::WebRTCDTMFSenderHandler> GetDtmfSender()
const override;
std::unique_ptr<blink::WebRTCRtpParameters> GetParameters() const override;
void GetStats(std::unique_ptr<blink::WebRTCStatsReportCallback>) override;
webrtc::RtpSenderInterface* webrtc_sender() const;
const webrtc::MediaStreamTrackInterface* webrtc_track() const;
std::vector<std::unique_ptr<WebRtcMediaStreamAdapterMap::AdapterRef>>
stream_refs() const;
// The ReplaceTrack() that takes a blink::WebRTCVoidRequest is implemented on
// top of this, which returns the result in a callback instead. Allows doing
// ReplaceTrack() without having a blink::WebRTCVoidRequest, which can only be
// constructed inside of blink.
void ReplaceTrack(blink::WebMediaStreamTrack with_track,
base::OnceCallback<void(bool)> callback);
bool RemoveFromPeerConnection(webrtc::PeerConnectionInterface* pc);
private:
class RTCRtpSenderInternal;
scoped_refptr<RTCRtpSenderInternal> internal_;
};
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_WEBRTC_RTC_RTP_SENDER_H_
| null | null | null | null | 18,244 |
44,936 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 44,936 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ppapi/shared_impl/file_ref_util.h"
#include <stddef.h>
#include "base/files/file_path.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
namespace ppapi {
std::string GetNameForInternalFilePath(const std::string& path) {
if (path == "/")
return path;
size_t pos = path.rfind('/');
CHECK(pos != std::string::npos);
return path.substr(pos + 1);
}
std::string GetNameForExternalFilePath(const base::FilePath& path) {
const base::FilePath::StringType& file_path = path.value();
size_t pos = file_path.rfind(base::FilePath::kSeparators[0]);
CHECK(pos != base::FilePath::StringType::npos);
#if defined(OS_WIN)
return base::WideToUTF8(file_path.substr(pos + 1));
#elif defined(OS_POSIX)
return file_path.substr(pos + 1);
#else
#error "Unsupported platform."
#endif
}
bool IsValidInternalPath(const std::string& path) {
// We check that:
// The path starts with '/'
// The path must contain valid UTF-8 characters.
// It must not FilePath::ReferencesParent().
if (path.empty() || !base::IsStringUTF8(path) || path[0] != '/')
return false;
base::FilePath file_path = base::FilePath::FromUTF8Unsafe(path);
if (file_path.ReferencesParent())
return false;
return true;
}
bool IsValidExternalPath(const base::FilePath& path) {
return !path.empty() && !path.ReferencesParent();
}
void NormalizeInternalPath(std::string* path) {
if (path->size() > 1 && path->back() == '/')
path->erase(path->size() - 1, 1);
}
} // namespace ppapi
| null | null | null | null | 41,799 |
654 | null |
train_val
|
a6802e21d824e786d1e2a8440cf749a6e1a8d95f
| 160,782 |
ImageMagick
| 0 |
https://github.com/ImageMagick/ImageMagick
|
2017-07-18 18:28:29-04:00
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% BBBB GGGG RRRR %
% B B G R R %
% BBBB G GG RRRR %
% B B G G R R %
% BBBB GGG R R %
% %
% %
% Read/Write Raw BGR Image Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/utility.h"
/*
Forward declarations.
*/
static MagickBooleanType
WriteBGRImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d B G R I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadBGRImage() reads an image of raw BGR, or BGRA samples and returns
% it. It allocates the memory necessary for the new Image structure and
% returns a pointer to the new image.
%
% The format of the ReadBGRImage method is:
%
% Image *ReadBGRImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadBGRImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const unsigned char
*pixels;
Image
*canvas_image,
*image;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
length;
ssize_t
count,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
if (image_info->interlace != PartitionInterlace)
{
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Create virtual canvas to support cropping (i.e. image.rgb[100x100+10+20]).
*/
canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse,
exception);
(void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod,
exception);
quantum_info=AcquireQuantumInfo(image_info,canvas_image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
quantum_type=BGRQuantum;
if (LocaleCompare(image_info->magick,"BGRA") == 0)
{
quantum_type=BGRAQuantum;
image->alpha_trait=BlendPixelTrait;
canvas_image->alpha_trait=BlendPixelTrait;
}
if (LocaleCompare(image_info->magick,"BGRO") == 0)
{
quantum_type=BGROQuantum;
image->alpha_trait=BlendPixelTrait;
canvas_image->alpha_trait=BlendPixelTrait;
}
pixels=(const unsigned char *) NULL;
if (image_info->number_scenes != 0)
while (image->scene < image_info->scene)
{
/*
Skip to next image.
*/
image->scene++;
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
break;
}
}
count=0;
length=0;
scene=0;
do
{
/*
Read pixels to virtual canvas image then push to image.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
switch (image_info->interlace)
{
case NoInterlace:
default:
{
/*
No interlacing: BGRBGRBGRBGRBGRBGR...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=QueueAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
break;
}
case LineInterlace:
{
static QuantumType
quantum_types[4] =
{
BlueQuantum,
GreenQuantum,
RedQuantum,
AlphaQuantum
};
/*
Line interlacing: BBB...GGG...RRR...RRR...GGG...BBB...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
for (i=0; i < (ssize_t) (image->alpha_trait != UndefinedPixelTrait ? 4 : 3); i++)
{
quantum_type=quantum_types[i];
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (quantum_type)
{
case RedQuantum:
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
break;
}
case GreenQuantum:
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
break;
}
case BlueQuantum:
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
break;
}
default:
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
{
/*
Plane interlacing: RRRRRR...GGGGGG...BBBBBB...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,6);
if (status == MagickFalse)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,6);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,AlphaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,6);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,6,6);
if (status == MagickFalse)
break;
}
break;
}
case PartitionInterlace:
{
/*
Partition interlacing: BBBBBB..., GGGGGG..., RRRRRR...
*/
AppendImageFormat("B",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
length=GetQuantumExtent(canvas_image,quantum_info,BlueQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("G",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,GreenQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("R",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) CloseBlob(image);
AppendImageFormat("A",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,AlphaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
(void) CloseBlob(image);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
}
SetQuantumImageType(image,quantum_type);
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (count == (ssize_t) length)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
scene++;
} while (count == (ssize_t) length);
quantum_info=DestroyQuantumInfo(quantum_info);
canvas_image=DestroyImage(canvas_image);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r B G R I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterBGRImage() adds attributes for the BGR image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterBGRImage method is:
%
% size_t RegisterBGRImage(void)
%
*/
ModuleExport size_t RegisterBGRImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("BGR","BGR","Raw blue, green, and red samples");
entry->decoder=(DecodeImageHandler *) ReadBGRImage;
entry->encoder=(EncodeImageHandler *) WriteBGRImage;
entry->flags|=CoderRawSupportFlag;
entry->flags|=CoderEndianSupportFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BGR","BGRA",
"Raw blue, green, red, and alpha samples");
entry->decoder=(DecodeImageHandler *) ReadBGRImage;
entry->encoder=(EncodeImageHandler *) WriteBGRImage;
entry->flags|=CoderRawSupportFlag;
entry->flags|=CoderEndianSupportFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BGR","BGRO",
"Raw blue, green, red, and opacity samples");
entry->decoder=(DecodeImageHandler *) ReadBGRImage;
entry->encoder=(EncodeImageHandler *) WriteBGRImage;
entry->flags|=CoderRawSupportFlag;
entry->flags|=CoderEndianSupportFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r B G R I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterBGRImage() removes format registrations made by the BGR module
% from the list of supported formats.
%
% The format of the UnregisterBGRImage method is:
%
% UnregisterBGRImage(void)
%
*/
ModuleExport void UnregisterBGRImage(void)
{
(void) UnregisterMagickInfo("BGRA");
(void) UnregisterMagickInfo("BGR");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e B G R I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteBGRImage() writes an image to a file in the BGR or BGRA
% rasterfile format.
%
% The format of the WriteBGRImage method is:
%
% MagickBooleanType WriteBGRImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteBGRImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
size_t
length;
ssize_t
count,
y;
unsigned char
*pixels;
/*
Allocate memory for pixels.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image_info->interlace != PartitionInterlace)
{
/*
Open output image file.
*/
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
}
quantum_type=BGRQuantum;
if (LocaleCompare(image_info->magick,"BGRA") == 0)
{
quantum_type=BGRAQuantum;
image->alpha_trait=BlendPixelTrait;
}
scene=0;
do
{
/*
Convert MIFF to BGR raster pixels.
*/
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if ((LocaleCompare(image_info->magick,"BGRA") == 0) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
switch (image_info->interlace)
{
case NoInterlace:
default:
{
/*
No interlacing: BGRBGRBGRBGRBGRBGR...
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case LineInterlace:
{
/*
Line interlacing: BBB...GGG...RRR...RRR...GGG...BBB...
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
BlueQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GreenQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
RedQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
if (quantum_type == BGRAQuantum)
{
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
AlphaQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
{
/*
Plane interlacing: RRRRRR...GGGGGG...BBBBBB...
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
RedQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,1,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GreenQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,2,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
BlueQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,3,6);
if (status == MagickFalse)
break;
}
if (quantum_type == BGRAQuantum)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
AlphaQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,5,6);
if (status == MagickFalse)
break;
}
}
if (image_info->interlace == PartitionInterlace)
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,6,6);
if (status == MagickFalse)
break;
}
break;
}
case PartitionInterlace:
{
/*
Partition interlacing: BBBBBB..., GGGGGG..., RRRRRR...
*/
AppendImageFormat("B",image->filename);
status=OpenBlob(image_info,image,scene == 0 ? WriteBinaryBlobMode :
AppendBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
BlueQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,1,6);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("G",image->filename);
status=OpenBlob(image_info,image,scene == 0 ? WriteBinaryBlobMode :
AppendBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GreenQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,2,6);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("R",image->filename);
status=OpenBlob(image_info,image,scene == 0 ? WriteBinaryBlobMode :
AppendBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
RedQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,3,6);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
if (quantum_type == BGRAQuantum)
{
(void) CloseBlob(image);
AppendImageFormat("A",image->filename);
status=OpenBlob(image_info,image,scene == 0 ? WriteBinaryBlobMode :
AppendBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
AlphaQuantum,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,5,6);
if (status == MagickFalse)
break;
}
}
(void) CloseBlob(image);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,6,6);
if (status == MagickFalse)
break;
}
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
| null | null | null | null | 73,075 |
16,725 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 16,725 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/translate/core/common/language_detection_logging_helper.h"
#include <string>
#include "components/sync/protocol/user_event_specifics.pb.h"
#include "components/translate/core/common/language_detection_details.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace translate {
// Tests that sync_pb::UserEventSpecifics is correctly build.
TEST(LanguageDetectionLoggingHelperTest, ConstructUserEventSpecifics) {
LanguageDetectionDetails details;
details.cld_language = "en";
details.is_cld_reliable = false;
details.adopted_language = "ja";
// Expected language detection.
sync_pb::UserEventSpecifics::LanguageDetection lang_detection;
auto* const lang = lang_detection.add_detected_languages();
lang->set_language_code(details.cld_language);
lang->set_is_reliable(details.is_cld_reliable);
lang_detection.set_adopted_language_code(details.adopted_language);
const int64_t navigation_id = 1000000000000000LL;
const std::unique_ptr<sync_pb::UserEventSpecifics> user_event =
ConstructLanguageDetectionEvent(navigation_id, details);
// Expect the navigation id is correctly set.
EXPECT_EQ(user_event->navigation_id(), navigation_id);
EXPECT_EQ(user_event->language_detection_event().SerializeAsString(),
lang_detection.SerializeAsString());
}
// Tests that sync_pb::UserEventSpecifics is correctly build.
// If adopted_language is the same as cld_language, we don't set it.
TEST(LanguageDetectionLoggingHelperTest, DontSetAdoptedLanguage) {
LanguageDetectionDetails details;
details.cld_language = "en";
details.is_cld_reliable = true;
details.adopted_language = "en";
// Expected language detection.
sync_pb::UserEventSpecifics::LanguageDetection lang_detection;
auto* const lang = lang_detection.add_detected_languages();
lang->set_language_code(details.cld_language);
lang->set_is_reliable(details.is_cld_reliable);
const std::unique_ptr<sync_pb::UserEventSpecifics> user_event =
ConstructLanguageDetectionEvent(100, details);
// Expect the navigation id is correctly set.
EXPECT_EQ(user_event->navigation_id(), 100);
EXPECT_EQ(user_event->language_detection_event().SerializeAsString(),
lang_detection.SerializeAsString());
}
} // namespace translate
| null | null | null | null | 13,588 |
867 | null |
train_val
|
1b0d3845b454eaaac0b2064c78926ca4d739a080
| 263,435 |
qemu
| 0 |
https://github.com/bonzini/qemu
|
2016-10-18 11:40:27+01:00
|
#include<stdio.h>
#include<assert.h>
int main()
{
int rd, rs, rt, dsp;
int result, resultdsp;
rs = 0x0;
rt = 0x12345678;
result = 0x12345678;
resultdsp = 0;
__asm
("shllv.ph %0, %2, %3\n\t"
"rddsp %1\n\t"
: "=r"(rd), "=r"(dsp)
: "r"(rt), "r"(rs)
);
dsp = (dsp >> 22) & 0x01;
assert(dsp == resultdsp);
assert(rd == result);
rs = 0x0B;
rt = 0x12345678;
result = 0xA000C000;
resultdsp = 1;
__asm
("shllv.ph %0, %2, %3\n\t"
"rddsp %1\n\t"
: "=r"(rd), "=r"(dsp)
: "r"(rt), "r"(rs)
);
dsp = (dsp >> 22) & 0x01;
assert(dsp == resultdsp);
assert(rd == result);
return 0;
}
| null | null | null | null | 121,559 |
37,032 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 37,032 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/*
* Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights
* reserved.
* Copyright (C) Research In Motion Limited 2011. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_TEXT_STRING_OPERATORS_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_TEXT_STRING_OPERATORS_H_
#include "third_party/blink/renderer/platform/wtf/allocator.h"
#include "third_party/blink/renderer/platform/wtf/text/string_concatenate.h"
namespace WTF {
template <typename StringType1, typename StringType2>
class StringAppend final {
STACK_ALLOCATED();
public:
StringAppend(StringType1 string1, StringType2 string2);
operator String() const;
operator AtomicString() const;
unsigned length() const;
bool Is8Bit() const;
void WriteTo(LChar* destination) const;
void WriteTo(UChar* destination) const;
private:
const StringType1 string1_;
const StringType2 string2_;
};
template <typename StringType1, typename StringType2>
StringAppend<StringType1, StringType2>::StringAppend(StringType1 string1,
StringType2 string2)
: string1_(string1), string2_(string2) {}
template <typename StringType1, typename StringType2>
StringAppend<StringType1, StringType2>::operator String() const {
if (Is8Bit()) {
LChar* buffer;
scoped_refptr<StringImpl> result =
StringImpl::CreateUninitialized(length(), buffer);
WriteTo(buffer);
return result;
}
UChar* buffer;
scoped_refptr<StringImpl> result =
StringImpl::CreateUninitialized(length(), buffer);
WriteTo(buffer);
return result;
}
template <typename StringType1, typename StringType2>
StringAppend<StringType1, StringType2>::operator AtomicString() const {
return AtomicString(static_cast<String>(*this));
}
template <typename StringType1, typename StringType2>
bool StringAppend<StringType1, StringType2>::Is8Bit() const {
StringTypeAdapter<StringType1> adapter1(string1_);
StringTypeAdapter<StringType2> adapter2(string2_);
return adapter1.Is8Bit() && adapter2.Is8Bit();
}
template <typename StringType1, typename StringType2>
void StringAppend<StringType1, StringType2>::WriteTo(LChar* destination) const {
DCHECK(Is8Bit());
StringTypeAdapter<StringType1> adapter1(string1_);
StringTypeAdapter<StringType2> adapter2(string2_);
adapter1.WriteTo(destination);
adapter2.WriteTo(destination + adapter1.length());
}
template <typename StringType1, typename StringType2>
void StringAppend<StringType1, StringType2>::WriteTo(UChar* destination) const {
StringTypeAdapter<StringType1> adapter1(string1_);
StringTypeAdapter<StringType2> adapter2(string2_);
adapter1.WriteTo(destination);
adapter2.WriteTo(destination + adapter1.length());
}
template <typename StringType1, typename StringType2>
unsigned StringAppend<StringType1, StringType2>::length() const {
StringTypeAdapter<StringType1> adapter1(string1_);
StringTypeAdapter<StringType2> adapter2(string2_);
unsigned total = adapter1.length() + adapter2.length();
// Guard against overflow.
CHECK_GE(total, adapter1.length());
CHECK_GE(total, adapter2.length());
return total;
}
template <typename StringType1, typename StringType2>
class StringTypeAdapter<StringAppend<StringType1, StringType2>> {
STACK_ALLOCATED();
public:
StringTypeAdapter<StringAppend<StringType1, StringType2>>(
const StringAppend<StringType1, StringType2>& buffer)
: buffer_(buffer) {}
unsigned length() const { return buffer_.length(); }
bool Is8Bit() const { return buffer_.Is8Bit(); }
void WriteTo(LChar* destination) const { buffer_.WriteTo(destination); }
void WriteTo(UChar* destination) const { buffer_.WriteTo(destination); }
private:
const StringAppend<StringType1, StringType2>& buffer_;
};
inline StringAppend<const char*, String> operator+(const char* string1,
const String& string2) {
return StringAppend<const char*, String>(string1, string2);
}
inline StringAppend<const char*, AtomicString> operator+(
const char* string1,
const AtomicString& string2) {
return StringAppend<const char*, AtomicString>(string1, string2);
}
inline StringAppend<const char*, StringView> operator+(
const char* string1,
const StringView& string2) {
return StringAppend<const char*, StringView>(string1, string2);
}
template <typename U, typename V>
inline StringAppend<const char*, StringAppend<U, V>> operator+(
const char* string1,
const StringAppend<U, V>& string2) {
return StringAppend<const char*, StringAppend<U, V>>(string1, string2);
}
inline StringAppend<const UChar*, String> operator+(const UChar* string1,
const String& string2) {
return StringAppend<const UChar*, String>(string1, string2);
}
inline StringAppend<const UChar*, AtomicString> operator+(
const UChar* string1,
const AtomicString& string2) {
return StringAppend<const UChar*, AtomicString>(string1, string2);
}
inline StringAppend<const UChar*, StringView> operator+(
const UChar* string1,
const StringView& string2) {
return StringAppend<const UChar*, StringView>(string1, string2);
}
template <typename U, typename V>
inline StringAppend<const UChar*, StringAppend<U, V>> operator+(
const UChar* string1,
const StringAppend<U, V>& string2) {
return StringAppend<const UChar*, StringAppend<U, V>>(string1, string2);
}
template <typename T>
StringAppend<String, T> operator+(const String& string1, T string2) {
return StringAppend<String, T>(string1, string2);
}
template <typename T>
StringAppend<AtomicString, T> operator+(const AtomicString& string1,
T string2) {
return StringAppend<AtomicString, T>(string1, string2);
}
template <typename T>
StringAppend<StringView, T> operator+(const StringView& string1, T string2) {
return StringAppend<StringView, T>(string1, string2);
}
template <typename U, typename V, typename W>
StringAppend<StringAppend<U, V>, W> operator+(const StringAppend<U, V>& string1,
W string2) {
return StringAppend<StringAppend<U, V>, W>(string1, string2);
}
} // namespace WTF
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_TEXT_STRING_OPERATORS_H_
| null | null | null | null | 33,895 |
17,497 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 182,492 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright (C) 2003 Microtronix Datacom Ltd.
* Copyright (C) 2000-2002 Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_NIOS2_CACHEFLUSH_H
#define _ASM_NIOS2_CACHEFLUSH_H
#include <linux/mm_types.h>
/*
* This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user.
*/
#define PG_dcache_clean PG_arch_1
struct mm_struct;
extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_dup_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long pfn);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr,
void *dst, void *src, int len);
extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr,
void *dst, void *src, int len);
extern void flush_dcache_range(unsigned long start, unsigned long end);
extern void invalidate_dcache_range(unsigned long start, unsigned long end);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#endif /* _ASM_NIOS2_CACHEFLUSH_H */
| null | null | null | null | 90,839 |
30,877 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 195,872 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* sysfs.c - sysfs support
*
* (C) 2006-2007 Shaohua Li <shaohua.li@intel.com>
*
* This code is licenced under the GPL.
*/
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include "cpuidle.h"
static unsigned int sysfs_switch;
static int __init cpuidle_sysfs_setup(char *unused)
{
sysfs_switch = 1;
return 1;
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
static ssize_t show_available_governors(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t i = 0;
struct cpuidle_governor *tmp;
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) -
CPUIDLE_NAME_LEN - 2))
goto out;
i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
}
out:
i+= sprintf(&buf[i], "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
static ssize_t show_current_driver(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret;
struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
if (drv)
ret = sprintf(buf, "%s\n", drv->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
}
static ssize_t show_current_governor(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret;
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
else
ret = sprintf(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
}
static ssize_t store_current_governor(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
int ret = -EINVAL;
size_t len = count;
struct cpuidle_governor *gov;
if (!len || len >= sizeof(gov_name))
return -EINVAL;
memcpy(gov_name, buf, len);
gov_name[len] = '\0';
if (gov_name[len - 1] == '\n')
gov_name[--len] = '\0';
mutex_lock(&cpuidle_lock);
list_for_each_entry(gov, &cpuidle_governors, governor_list) {
if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) {
ret = cpuidle_switch_governor(gov);
break;
}
}
mutex_unlock(&cpuidle_lock);
if (ret)
return ret;
else
return count;
}
static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
static struct attribute *cpuidle_default_attrs[] = {
&dev_attr_current_driver.attr,
&dev_attr_current_governor_ro.attr,
NULL
};
static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL);
static DEVICE_ATTR(current_governor, 0644, show_current_governor,
store_current_governor);
static struct attribute *cpuidle_switch_attrs[] = {
&dev_attr_available_governors.attr,
&dev_attr_current_driver.attr,
&dev_attr_current_governor.attr,
NULL
};
static struct attribute_group cpuidle_attr_group = {
.attrs = cpuidle_default_attrs,
.name = "cpuidle",
};
/**
* cpuidle_add_interface - add CPU global sysfs attributes
*/
int cpuidle_add_interface(struct device *dev)
{
if (sysfs_switch)
cpuidle_attr_group.attrs = cpuidle_switch_attrs;
return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
}
/**
* cpuidle_remove_interface - remove CPU global sysfs attributes
*/
void cpuidle_remove_interface(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &cpuidle_attr_group);
}
struct cpuidle_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_device *, char *);
ssize_t (*store)(struct cpuidle_device *, const char *, size_t count);
};
#define define_one_ro(_name, show) \
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
#define define_one_rw(_name, show, store) \
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
struct cpuidle_device_kobj {
struct cpuidle_device *dev;
struct completion kobj_unregister;
struct kobject kobj;
};
static inline struct cpuidle_device *to_cpuidle_device(struct kobject *kobj)
{
struct cpuidle_device_kobj *kdev =
container_of(kobj, struct cpuidle_device_kobj, kobj);
return kdev->dev;
}
static ssize_t cpuidle_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
int ret = -EIO;
struct cpuidle_device *dev = to_cpuidle_device(kobj);
struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
if (cattr->show) {
mutex_lock(&cpuidle_lock);
ret = cattr->show(dev, buf);
mutex_unlock(&cpuidle_lock);
}
return ret;
}
static ssize_t cpuidle_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
int ret = -EIO;
struct cpuidle_device *dev = to_cpuidle_device(kobj);
struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
if (cattr->store) {
mutex_lock(&cpuidle_lock);
ret = cattr->store(dev, buf, count);
mutex_unlock(&cpuidle_lock);
}
return ret;
}
static const struct sysfs_ops cpuidle_sysfs_ops = {
.show = cpuidle_show,
.store = cpuidle_store,
};
static void cpuidle_sysfs_release(struct kobject *kobj)
{
struct cpuidle_device_kobj *kdev =
container_of(kobj, struct cpuidle_device_kobj, kobj);
complete(&kdev->kobj_unregister);
}
static struct kobj_type ktype_cpuidle = {
.sysfs_ops = &cpuidle_sysfs_ops,
.release = cpuidle_sysfs_release,
};
struct cpuidle_state_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_state *, \
struct cpuidle_state_usage *, char *);
ssize_t (*store)(struct cpuidle_state *, \
struct cpuidle_state_usage *, const char *, size_t);
};
#define define_one_state_ro(_name, show) \
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
#define define_one_state_rw(_name, show, store) \
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
#define define_show_state_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
return sprintf(buf, "%u\n", state->_name);\
}
#define define_store_state_ull_function(_name) \
static ssize_t store_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
const char *buf, size_t size) \
{ \
unsigned long long value; \
int err; \
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
err = kstrtoull(buf, 0, &value); \
if (err) \
return err; \
if (value) \
state_usage->_name = 1; \
else \
state_usage->_name = 0; \
return size; \
}
#define define_show_state_ull_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
return sprintf(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
if (state->_name[0] == '\0')\
return sprintf(buf, "<null>\n");\
return sprintf(buf, "%s\n", state->_name);\
}
define_show_state_function(exit_latency)
define_show_state_function(target_residency)
define_show_state_function(power_usage)
define_show_state_ull_function(usage)
define_show_state_ull_function(time)
define_show_state_str_function(name)
define_show_state_str_function(desc)
define_show_state_ull_function(disable)
define_store_state_ull_function(disable)
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
define_one_state_ro(residency, show_state_target_residency);
define_one_state_ro(power, show_state_power_usage);
define_one_state_ro(usage, show_state_usage);
define_one_state_ro(time, show_state_time);
define_one_state_rw(disable, show_state_disable, store_state_disable);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
&attr_desc.attr,
&attr_latency.attr,
&attr_residency.attr,
&attr_power.attr,
&attr_usage.attr,
&attr_time.attr,
&attr_disable.attr,
NULL
};
struct cpuidle_state_kobj {
struct cpuidle_state *state;
struct cpuidle_state_usage *state_usage;
struct completion kobj_unregister;
struct kobject kobj;
};
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
char * buf)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
if (cattr->show)
ret = cattr->show(state, state_usage, buf);
return ret;
}
static ssize_t cpuidle_state_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t size)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
if (cattr->store)
ret = cattr->store(state, state_usage, buf, size);
return ret;
}
static const struct sysfs_ops cpuidle_state_sysfs_ops = {
.show = cpuidle_state_show,
.store = cpuidle_state_store,
};
static void cpuidle_state_sysfs_release(struct kobject *kobj)
{
struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj);
complete(&state_obj->kobj_unregister);
}
static struct kobj_type ktype_state_cpuidle = {
.sysfs_ops = &cpuidle_state_sysfs_ops,
.default_attrs = cpuidle_state_default_attrs,
.release = cpuidle_state_sysfs_release,
};
static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
{
kobject_put(&device->kobjs[i]->kobj);
wait_for_completion(&device->kobjs[i]->kobj_unregister);
kfree(device->kobjs[i]);
device->kobjs[i] = NULL;
}
/**
* cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes
* @device: the target device
*/
static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
struct cpuidle_device_kobj *kdev = device->kobj_dev;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
for (i = 0; i < drv->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj) {
ret = -ENOMEM;
goto error_state;
}
kobj->state = &drv->states[i];
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
&kdev->kobj, "state%d", i);
if (ret) {
kfree(kobj);
goto error_state;
}
kobject_uevent(&kobj->kobj, KOBJ_ADD);
device->kobjs[i] = kobj;
}
return 0;
error_state:
for (i = i - 1; i >= 0; i--)
cpuidle_free_state_kobj(device, i);
return ret;
}
/**
* cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes
* @device: the target device
*/
static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
{
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
int i;
for (i = 0; i < drv->state_count; i++)
cpuidle_free_state_kobj(device, i);
}
#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
#define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj)
#define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr)
#define define_one_driver_ro(_name, show) \
static struct cpuidle_driver_attr attr_driver_##_name = \
__ATTR(_name, 0444, show, NULL)
struct cpuidle_driver_kobj {
struct cpuidle_driver *drv;
struct completion kobj_unregister;
struct kobject kobj;
};
struct cpuidle_driver_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_driver *, char *);
ssize_t (*store)(struct cpuidle_driver *, const char *, size_t);
};
static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
{
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
spin_unlock(&cpuidle_driver_lock);
return ret;
}
static void cpuidle_driver_sysfs_release(struct kobject *kobj)
{
struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
complete(&driver_kobj->kobj_unregister);
}
static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
int ret = -EIO;
struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
if (dattr->show)
ret = dattr->show(driver_kobj->drv, buf);
return ret;
}
static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t size)
{
int ret = -EIO;
struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
if (dattr->store)
ret = dattr->store(driver_kobj->drv, buf, size);
return ret;
}
define_one_driver_ro(name, show_driver_name);
static const struct sysfs_ops cpuidle_driver_sysfs_ops = {
.show = cpuidle_driver_show,
.store = cpuidle_driver_store,
};
static struct attribute *cpuidle_driver_default_attrs[] = {
&attr_driver_name.attr,
NULL
};
static struct kobj_type ktype_driver_cpuidle = {
.sysfs_ops = &cpuidle_driver_sysfs_ops,
.default_attrs = cpuidle_driver_default_attrs,
.release = cpuidle_driver_sysfs_release,
};
/**
* cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
* @device: the target device
*/
static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
{
struct cpuidle_driver_kobj *kdrv;
struct cpuidle_device_kobj *kdev = dev->kobj_dev;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int ret;
kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL);
if (!kdrv)
return -ENOMEM;
kdrv->drv = drv;
init_completion(&kdrv->kobj_unregister);
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
&kdev->kobj, "driver");
if (ret) {
kfree(kdrv);
return ret;
}
kobject_uevent(&kdrv->kobj, KOBJ_ADD);
dev->kobj_driver = kdrv;
return ret;
}
/**
* cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
* @device: the target device
*/
static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
{
struct cpuidle_driver_kobj *kdrv = dev->kobj_driver;
kobject_put(&kdrv->kobj);
wait_for_completion(&kdrv->kobj_unregister);
kfree(kdrv);
}
#else
static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
{
return 0;
}
static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
{
;
}
#endif
/**
* cpuidle_add_device_sysfs - adds device specific sysfs attributes
* @device: the target device
*/
int cpuidle_add_device_sysfs(struct cpuidle_device *device)
{
int ret;
ret = cpuidle_add_state_sysfs(device);
if (ret)
return ret;
ret = cpuidle_add_driver_sysfs(device);
if (ret)
cpuidle_remove_state_sysfs(device);
return ret;
}
/**
* cpuidle_remove_device_sysfs : removes device specific sysfs attributes
* @device : the target device
*/
void cpuidle_remove_device_sysfs(struct cpuidle_device *device)
{
cpuidle_remove_driver_sysfs(device);
cpuidle_remove_state_sysfs(device);
}
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
* @dev: the target device
*/
int cpuidle_add_sysfs(struct cpuidle_device *dev)
{
struct cpuidle_device_kobj *kdev;
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error;
/*
* Return if cpu_device is not setup for this CPU.
*
* This could happen if the arch did not set up cpu_device
* since this CPU is not in cpu_present mask and the
* driver did not send a correct CPU mask during registration.
* Without this check we would end up passing bogus
* value for &cpu_dev->kobj in kobject_init_and_add()
*/
if (!cpu_dev)
return -ENODEV;
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev)
return -ENOMEM;
kdev->dev = dev;
dev->kobj_dev = kdev;
init_completion(&kdev->kobj_unregister);
error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (error) {
kfree(kdev);
return error;
}
kobject_uevent(&kdev->kobj, KOBJ_ADD);
return 0;
}
/**
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
* @dev: the target device
*/
void cpuidle_remove_sysfs(struct cpuidle_device *dev)
{
struct cpuidle_device_kobj *kdev = dev->kobj_dev;
kobject_put(&kdev->kobj);
wait_for_completion(&kdev->kobj_unregister);
kfree(kdev);
}
| null | null | null | null | 104,219 |
10,276 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 175,271 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
#ifndef __ASM_ARM_CACHETYPE_H
#define __ASM_ARM_CACHETYPE_H
#define CACHEID_VIVT (1 << 0)
#define CACHEID_VIPT_NONALIASING (1 << 1)
#define CACHEID_VIPT_ALIASING (1 << 2)
#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
#define CACHEID_ASID_TAGGED (1 << 3)
#define CACHEID_VIPT_I_ALIASING (1 << 4)
#define CACHEID_PIPT (1 << 5)
extern unsigned int cacheid;
#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
#define icache_is_pipt() cacheid_is(CACHEID_PIPT)
/*
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
* Mask out support which will never be present on newer CPUs.
* - v6+ is never VIVT
* - v7+ VIPT never aliases on D-side
*/
#if __LINUX_ARM_ARCH__ >= 7
#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
CACHEID_ASID_TAGGED |\
CACHEID_VIPT_I_ALIASING |\
CACHEID_PIPT)
#elif __LINUX_ARM_ARCH__ >= 6
#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
#else
#define __CACHEID_ARCH_MIN (~0)
#endif
/*
* Mask out support which isn't configured
*/
#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
#define __CACHEID_ALWAYS (CACHEID_VIVT)
#define __CACHEID_NEVER (~CACHEID_VIVT)
#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
#define __CACHEID_ALWAYS (0)
#define __CACHEID_NEVER (CACHEID_VIVT)
#else
#define __CACHEID_ALWAYS (0)
#define __CACHEID_NEVER (0)
#endif
static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
{
return (__CACHEID_ALWAYS & mask) |
(~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
}
#define CSSELR_ICACHE 1
#define CSSELR_DCACHE 0
#define CSSELR_L1 (0 << 1)
#define CSSELR_L2 (1 << 1)
#define CSSELR_L3 (2 << 1)
#define CSSELR_L4 (3 << 1)
#define CSSELR_L5 (4 << 1)
#define CSSELR_L6 (5 << 1)
#define CSSELR_L7 (6 << 1)
#ifndef CONFIG_CPU_V7M
static inline void set_csselr(unsigned int cache_selector)
{
asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (cache_selector));
}
static inline unsigned int read_ccsidr(void)
{
unsigned int val;
asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
return val;
}
#else /* CONFIG_CPU_V7M */
#include <linux/io.h>
#include "asm/v7m.h"
static inline void set_csselr(unsigned int cache_selector)
{
writel(cache_selector, BASEADDR_V7M_SCB + V7M_SCB_CTR);
}
static inline unsigned int read_ccsidr(void)
{
return readl(BASEADDR_V7M_SCB + V7M_SCB_CCSIDR);
}
#endif
#endif
| null | null | null | null | 83,618 |
22,215 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 22,215 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/host_zoom_map_impl.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include <utility>
#include "base/strings/string_piece.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/default_clock.h"
#include "base/values.h"
#include "content/browser/frame_host/navigation_entry_impl.h"
#include "content/browser/renderer_host/render_process_host_impl.h"
#include "content/browser/renderer_host/render_view_host_impl.h"
#include "content/browser/web_contents/web_contents_impl.h"
#include "content/common/view_messages.h"
#include "content/public/browser/browser_context.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/render_frame_host.h"
#include "content/public/browser/resource_context.h"
#include "content/public/browser/site_instance.h"
#include "content/public/browser/storage_partition.h"
#include "content/public/common/page_zoom.h"
#include "content/public/common/url_constants.h"
#include "net/base/url_util.h"
namespace content {
namespace {
std::string GetHostFromProcessView(int render_process_id, int render_view_id) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
RenderViewHost* render_view_host =
RenderViewHost::FromID(render_process_id, render_view_id);
if (!render_view_host)
return std::string();
WebContents* web_contents = WebContents::FromRenderViewHost(render_view_host);
NavigationEntry* entry =
web_contents->GetController().GetLastCommittedEntry();
if (!entry)
return std::string();
return net::GetHostOrSpecFromURL(HostZoomMap::GetURLFromEntry(entry));
}
} // namespace
GURL HostZoomMap::GetURLFromEntry(const NavigationEntry* entry) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
switch (entry->GetPageType()) {
case PAGE_TYPE_ERROR:
return GURL(kUnreachableWebDataURL);
// TODO(wjmaclean): In future, give interstitial pages special treatment as
// well.
default:
return entry->GetURL();
}
}
HostZoomMap* HostZoomMap::GetDefaultForBrowserContext(BrowserContext* context) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
StoragePartition* partition =
BrowserContext::GetDefaultStoragePartition(context);
DCHECK(partition);
return partition->GetHostZoomMap();
}
HostZoomMap* HostZoomMap::Get(SiteInstance* instance) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
StoragePartition* partition = BrowserContext::GetStoragePartition(
instance->GetBrowserContext(), instance);
DCHECK(partition);
return partition->GetHostZoomMap();
}
HostZoomMap* HostZoomMap::GetForWebContents(const WebContents* contents) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
// TODO(wjmaclean): Update this behaviour to work with OOPIF.
// See crbug.com/528407.
StoragePartition* partition =
BrowserContext::GetStoragePartition(contents->GetBrowserContext(),
contents->GetSiteInstance());
DCHECK(partition);
return partition->GetHostZoomMap();
}
// Helper function for setting/getting zoom levels for WebContents without
// having to import HostZoomMapImpl everywhere.
double HostZoomMap::GetZoomLevel(const WebContents* web_contents) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
HostZoomMapImpl* host_zoom_map = static_cast<HostZoomMapImpl*>(
HostZoomMap::GetForWebContents(web_contents));
return host_zoom_map->GetZoomLevelForWebContents(
*static_cast<const WebContentsImpl*>(web_contents));
}
bool HostZoomMap::PageScaleFactorIsOne(const WebContents* web_contents) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
HostZoomMapImpl* host_zoom_map = static_cast<HostZoomMapImpl*>(
HostZoomMap::GetForWebContents(web_contents));
return host_zoom_map->PageScaleFactorIsOneForWebContents(
*static_cast<const WebContentsImpl*>(web_contents));
}
void HostZoomMap::SetZoomLevel(const WebContents* web_contents, double level) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
HostZoomMapImpl* host_zoom_map = static_cast<HostZoomMapImpl*>(
HostZoomMap::GetForWebContents(web_contents));
host_zoom_map->SetZoomLevelForWebContents(
*static_cast<const WebContentsImpl*>(web_contents), level);
}
void HostZoomMap::SendErrorPageZoomLevelRefresh(
const WebContents* web_contents) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
HostZoomMapImpl* host_zoom_map =
static_cast<HostZoomMapImpl*>(HostZoomMap::GetDefaultForBrowserContext(
web_contents->GetBrowserContext()));
host_zoom_map->SendErrorPageZoomLevelRefresh();
}
HostZoomMapImpl::HostZoomMapImpl()
: default_zoom_level_(0.0),
clock_(base::DefaultClock::GetInstance()) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
}
void HostZoomMapImpl::CopyFrom(HostZoomMap* copy_interface) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
HostZoomMapImpl* copy = static_cast<HostZoomMapImpl*>(copy_interface);
host_zoom_levels_.insert(copy->host_zoom_levels_.begin(),
copy->host_zoom_levels_.end());
for (const auto& it : copy->scheme_host_zoom_levels_) {
const std::string& host = it.first;
scheme_host_zoom_levels_[host] = HostZoomLevels();
scheme_host_zoom_levels_[host].insert(it.second.begin(), it.second.end());
}
default_zoom_level_ = copy->default_zoom_level_;
}
double HostZoomMapImpl::GetZoomLevelForHost(const std::string& host) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
const auto it = host_zoom_levels_.find(host);
return it != host_zoom_levels_.end() ? it->second.level : default_zoom_level_;
}
bool HostZoomMapImpl::HasZoomLevel(const std::string& scheme,
const std::string& host) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SchemeHostZoomLevels::const_iterator scheme_iterator(
scheme_host_zoom_levels_.find(scheme));
const HostZoomLevels& zoom_levels =
(scheme_iterator != scheme_host_zoom_levels_.end())
? scheme_iterator->second
: host_zoom_levels_;
return base::ContainsKey(zoom_levels, host);
}
double HostZoomMapImpl::GetZoomLevelForHostAndScheme(
const std::string& scheme,
const std::string& host) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SchemeHostZoomLevels::const_iterator scheme_iterator(
scheme_host_zoom_levels_.find(scheme));
if (scheme_iterator != scheme_host_zoom_levels_.end()) {
HostZoomLevels::const_iterator i(scheme_iterator->second.find(host));
if (i != scheme_iterator->second.end())
return i->second.level;
}
return GetZoomLevelForHost(host);
}
HostZoomMap::ZoomLevelVector HostZoomMapImpl::GetAllZoomLevels() const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
HostZoomMap::ZoomLevelVector result;
result.reserve(host_zoom_levels_.size() + scheme_host_zoom_levels_.size());
for (const auto& entry : host_zoom_levels_) {
ZoomLevelChange change = {
HostZoomMap::ZOOM_CHANGED_FOR_HOST,
entry.first, // host
std::string(), // scheme
entry.second.level, // zoom level
entry.second.last_modified // last modified
};
result.push_back(change);
}
for (const auto& scheme_entry : scheme_host_zoom_levels_) {
const std::string& scheme = scheme_entry.first;
const HostZoomLevels& host_zoom_levels = scheme_entry.second;
for (const auto& entry : host_zoom_levels) {
ZoomLevelChange change = {
HostZoomMap::ZOOM_CHANGED_FOR_SCHEME_AND_HOST,
entry.first, // host
scheme, // scheme
entry.second.level, // zoom level
entry.second.last_modified // last modified
};
result.push_back(change);
}
}
return result;
}
void HostZoomMapImpl::SetZoomLevelForHost(const std::string& host,
double level) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
base::Time last_modified = clock_->Now();
SetZoomLevelForHostInternal(host, level, last_modified);
}
void HostZoomMapImpl::InitializeZoomLevelForHost(const std::string& host,
double level,
base::Time last_modified) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SetZoomLevelForHostInternal(host, level, last_modified);
}
void HostZoomMapImpl::SetZoomLevelForHostInternal(const std::string& host,
double level,
base::Time last_modified) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (ZoomValuesEqual(level, default_zoom_level_)) {
host_zoom_levels_.erase(host);
} else {
ZoomLevel& zoomLevel = host_zoom_levels_[host];
zoomLevel.level = level;
zoomLevel.last_modified = last_modified;
}
// TODO(wjmaclean) Should we use a GURL here? crbug.com/384486
SendZoomLevelChange(std::string(), host, level);
HostZoomMap::ZoomLevelChange change;
change.mode = HostZoomMap::ZOOM_CHANGED_FOR_HOST;
change.host = host;
change.zoom_level = level;
change.last_modified = last_modified;
zoom_level_changed_callbacks_.Notify(change);
}
void HostZoomMapImpl::SetZoomLevelForHostAndScheme(const std::string& scheme,
const std::string& host,
double level) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
// No last_modified timestamp for scheme and host because they are
// not persistet and are used for special cases only.
scheme_host_zoom_levels_[scheme][host].level = level;
SendZoomLevelChange(scheme, host, level);
HostZoomMap::ZoomLevelChange change;
change.mode = HostZoomMap::ZOOM_CHANGED_FOR_SCHEME_AND_HOST;
change.host = host;
change.scheme = scheme;
change.zoom_level = level;
change.last_modified = base::Time();
zoom_level_changed_callbacks_.Notify(change);
}
double HostZoomMapImpl::GetDefaultZoomLevel() const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
return default_zoom_level_;
}
void HostZoomMapImpl::SetDefaultZoomLevel(double level) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (ZoomValuesEqual(level, default_zoom_level_))
return;
default_zoom_level_ = level;
// First, remove all entries that match the new default zoom level.
for (auto it = host_zoom_levels_.begin(); it != host_zoom_levels_.end();) {
if (ZoomValuesEqual(it->second.level, default_zoom_level_))
it = host_zoom_levels_.erase(it);
else
it++;
}
// Second, update zoom levels for all pages that do not have an overriding
// entry.
for (auto* web_contents : WebContentsImpl::GetAllWebContents()) {
// Only change zoom for WebContents tied to the StoragePartition this
// HostZoomMap serves.
if (GetForWebContents(web_contents) != this)
continue;
int render_process_id =
web_contents->GetRenderViewHost()->GetProcess()->GetID();
int render_view_id = web_contents->GetRenderViewHost()->GetRoutingID();
// Get the url from the navigation controller directly, as calling
// WebContentsImpl::GetLastCommittedURL() may give us a virtual url that
// is different than the one stored in the map.
GURL url;
std::string host;
std::string scheme;
NavigationEntry* entry =
web_contents->GetController().GetLastCommittedEntry();
// It is possible for a WebContent's zoom level to be queried before
// a navigation has occurred.
if (entry) {
url = GetURLFromEntry(entry);
scheme = url.scheme();
host = net::GetHostOrSpecFromURL(url);
}
bool uses_default_zoom =
!HasZoomLevel(scheme, host) &&
!UsesTemporaryZoomLevel(render_process_id, render_view_id);
if (uses_default_zoom) {
web_contents->UpdateZoom(level);
HostZoomMap::ZoomLevelChange change;
change.mode = HostZoomMap::ZOOM_CHANGED_FOR_HOST;
change.host = host;
change.zoom_level = level;
zoom_level_changed_callbacks_.Notify(change);
}
}
}
std::unique_ptr<HostZoomMap::Subscription>
HostZoomMapImpl::AddZoomLevelChangedCallback(
const ZoomLevelChangedCallback& callback) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
return zoom_level_changed_callbacks_.Add(callback);
}
double HostZoomMapImpl::GetZoomLevelForWebContents(
const WebContentsImpl& web_contents_impl) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
int render_process_id =
web_contents_impl.GetRenderViewHost()->GetProcess()->GetID();
int routing_id = web_contents_impl.GetRenderViewHost()->GetRoutingID();
if (UsesTemporaryZoomLevel(render_process_id, routing_id))
return GetTemporaryZoomLevel(render_process_id, routing_id);
// Get the url from the navigation controller directly, as calling
// WebContentsImpl::GetLastCommittedURL() may give us a virtual url that
// is different than is stored in the map.
GURL url;
NavigationEntry* entry =
web_contents_impl.GetController().GetLastCommittedEntry();
// It is possible for a WebContent's zoom level to be queried before
// a navigation has occurred.
if (entry)
url = GetURLFromEntry(entry);
return GetZoomLevelForHostAndScheme(url.scheme(),
net::GetHostOrSpecFromURL(url));
}
void HostZoomMapImpl::SetZoomLevelForWebContents(
const WebContentsImpl& web_contents_impl,
double level) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
int render_process_id =
web_contents_impl.GetRenderViewHost()->GetProcess()->GetID();
int render_view_id = web_contents_impl.GetRenderViewHost()->GetRoutingID();
if (UsesTemporaryZoomLevel(render_process_id, render_view_id)) {
SetTemporaryZoomLevel(render_process_id, render_view_id, level);
} else {
// Get the url from the navigation controller directly, as calling
// WebContentsImpl::GetLastCommittedURL() may give us a virtual url that
// is different than what the render view is using. If the two don't match,
// the attempt to set the zoom will fail.
NavigationEntry* entry =
web_contents_impl.GetController().GetLastCommittedEntry();
// Tests may invoke this function with a null entry, but we don't
// want to save zoom levels in this case.
if (!entry)
return;
GURL url = GetURLFromEntry(entry);
SetZoomLevelForHost(net::GetHostOrSpecFromURL(url), level);
}
}
void HostZoomMapImpl::SetZoomLevelForView(int render_process_id,
int render_view_id,
double level,
const std::string& host) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (UsesTemporaryZoomLevel(render_process_id, render_view_id))
SetTemporaryZoomLevel(render_process_id, render_view_id, level);
else
SetZoomLevelForHost(host, level);
}
void HostZoomMapImpl::SetPageScaleFactorIsOneForView(int render_process_id,
int render_view_id,
bool is_one) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
view_page_scale_factors_are_one_[RenderViewKey(render_process_id,
render_view_id)] = is_one;
HostZoomMap::ZoomLevelChange change;
change.mode = HostZoomMap::PAGE_SCALE_IS_ONE_CHANGED;
zoom_level_changed_callbacks_.Notify(change);
}
bool HostZoomMapImpl::PageScaleFactorIsOneForWebContents(
const WebContentsImpl& web_contents_impl) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (!web_contents_impl.GetRenderViewHost()->GetProcess())
return true;
const auto it = view_page_scale_factors_are_one_.find(RenderViewKey(
web_contents_impl.GetRenderViewHost()->GetProcess()->GetID(),
web_contents_impl.GetRenderViewHost()->GetRoutingID()));
return it != view_page_scale_factors_are_one_.end() ? it->second : true;
}
void HostZoomMapImpl::ClearPageScaleFactorIsOneForView(int render_process_id,
int render_view_id) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
view_page_scale_factors_are_one_.erase(
RenderViewKey(render_process_id, render_view_id));
}
bool HostZoomMapImpl::UsesTemporaryZoomLevel(int render_process_id,
int render_view_id) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
RenderViewKey key(render_process_id, render_view_id);
return base::ContainsKey(temporary_zoom_levels_, key);
}
double HostZoomMapImpl::GetTemporaryZoomLevel(int render_process_id,
int render_view_id) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
RenderViewKey key(render_process_id, render_view_id);
const auto it = temporary_zoom_levels_.find(key);
return it != temporary_zoom_levels_.end() ? it->second : 0;
}
void HostZoomMapImpl::SetTemporaryZoomLevel(int render_process_id,
int render_view_id,
double level) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
RenderViewKey key(render_process_id, render_view_id);
temporary_zoom_levels_[key] = level;
WebContentsImpl* web_contents =
static_cast<WebContentsImpl*>(WebContents::FromRenderViewHost(
RenderViewHost::FromID(render_process_id, render_view_id)));
web_contents->SetTemporaryZoomLevel(level, true);
HostZoomMap::ZoomLevelChange change;
change.mode = HostZoomMap::ZOOM_CHANGED_TEMPORARY_ZOOM;
change.host = GetHostFromProcessView(render_process_id, render_view_id);
change.zoom_level = level;
zoom_level_changed_callbacks_.Notify(change);
}
double HostZoomMapImpl::GetZoomLevelForView(const GURL& url,
int render_process_id,
int render_view_id) const {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
RenderViewKey key(render_process_id, render_view_id);
if (base::ContainsKey(temporary_zoom_levels_, key))
return temporary_zoom_levels_.find(key)->second;
return GetZoomLevelForHostAndScheme(url.scheme(),
net::GetHostOrSpecFromURL(url));
}
void HostZoomMapImpl::ClearZoomLevels(base::Time delete_begin,
base::Time delete_end) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
double default_zoom_level = GetDefaultZoomLevel();
for (const auto& zoom_level : GetAllZoomLevels()) {
if (zoom_level.scheme.empty() && delete_begin <= zoom_level.last_modified &&
(delete_end.is_null() || zoom_level.last_modified < delete_end)) {
SetZoomLevelForHost(zoom_level.host, default_zoom_level);
}
}
}
void HostZoomMapImpl::ClearTemporaryZoomLevel(int render_process_id,
int render_view_id) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
RenderViewKey key(render_process_id, render_view_id);
TemporaryZoomLevels::iterator it = temporary_zoom_levels_.find(key);
if (it == temporary_zoom_levels_.end())
return;
temporary_zoom_levels_.erase(it);
WebContentsImpl* web_contents =
static_cast<WebContentsImpl*>(WebContents::FromRenderViewHost(
RenderViewHost::FromID(render_process_id, render_view_id)));
web_contents->SetTemporaryZoomLevel(GetZoomLevelForHost(
GetHostFromProcessView(render_process_id, render_view_id)), false);
}
void HostZoomMapImpl::SendZoomLevelChange(const std::string& scheme,
const std::string& host,
double level) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
// We'll only send to WebContents not using temporary zoom levels. The one
// other case of interest is where the renderer is hosting a plugin document;
// that should be reflected in our temporary zoom level map, but we will
// double check on the renderer side to avoid the possibility of any races.
for (auto* web_contents : WebContentsImpl::GetAllWebContents()) {
// Only send zoom level changes to WebContents that are using this
// HostZoomMap.
if (GetForWebContents(web_contents) != this)
continue;
int render_process_id =
web_contents->GetRenderViewHost()->GetProcess()->GetID();
int render_view_id = web_contents->GetRenderViewHost()->GetRoutingID();
if (!UsesTemporaryZoomLevel(render_process_id, render_view_id))
web_contents->UpdateZoomIfNecessary(scheme, host, level);
}
}
void HostZoomMapImpl::SendErrorPageZoomLevelRefresh() {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
GURL error_url(kUnreachableWebDataURL);
std::string host = net::GetHostOrSpecFromURL(error_url);
double error_page_zoom_level = GetZoomLevelForHost(host);
SendZoomLevelChange(std::string(), host, error_page_zoom_level);
}
void HostZoomMapImpl::WillCloseRenderView(int render_process_id,
int render_view_id) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
ClearTemporaryZoomLevel(render_process_id, render_view_id);
ClearPageScaleFactorIsOneForView(render_process_id, render_view_id);
}
HostZoomMapImpl::~HostZoomMapImpl() {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
}
void HostZoomMapImpl::SetClockForTesting(base::Clock* clock) {
clock_ = clock;
}
} // namespace content
| null | null | null | null | 19,078 |
46,607 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 46,607 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef ASH_SYSTEM_AUDIO_AUDIO_DETAILED_VIEW_H_
#define ASH_SYSTEM_AUDIO_AUDIO_DETAILED_VIEW_H_
#include <map>
#include "ash/system/tray/tray_details_view.h"
#include "base/macros.h"
#include "chromeos/audio/audio_device.h"
namespace gfx {
struct VectorIcon;
}
namespace ash {
namespace tray {
class AudioDetailedView : public TrayDetailsView {
public:
explicit AudioDetailedView(SystemTrayItem* owner);
~AudioDetailedView() override;
void Update();
private:
// Helper function to add non-clickable header rows within the scrollable
// list.
void AddAudioSubHeader(const gfx::VectorIcon& icon, int text_id);
void CreateItems();
void UpdateScrollableList();
void UpdateAudioDevices();
// TrayDetailsView:
void HandleViewClicked(views::View* view) override;
typedef std::map<views::View*, chromeos::AudioDevice> AudioDeviceMap;
chromeos::AudioDeviceList output_devices_;
chromeos::AudioDeviceList input_devices_;
AudioDeviceMap device_map_;
DISALLOW_COPY_AND_ASSIGN(AudioDetailedView);
};
} // namespace tray
} // namespace ash
#endif // ASH_SYSTEM_AUDIO_AUDIO_DETAILED_VIEW_H_
| null | null | null | null | 43,470 |
5,591 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 170,586 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* rx51.c -- SoC audio for Nokia RX-51
*
* Copyright (C) 2008 - 2009 Nokia Corporation
*
* Contact: Peter Ujfalusi <peter.ujfalusi@ti.com>
* Eduardo Valentin <eduardo.valentin@nokia.com>
* Jarkko Nikula <jarkko.nikula@bitmer.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include <asm/mach-types.h>
#include "omap-mcbsp.h"
enum {
RX51_JACK_DISABLED,
RX51_JACK_TVOUT, /* tv-out with stereo output */
RX51_JACK_HP, /* headphone: stereo output, no mic */
RX51_JACK_HS, /* headset: stereo output with mic */
};
struct rx51_audio_pdata {
struct gpio_desc *tvout_selection_gpio;
struct gpio_desc *jack_detection_gpio;
struct gpio_desc *eci_sw_gpio;
struct gpio_desc *speaker_amp_gpio;
};
static int rx51_spk_func;
static int rx51_dmic_func;
static int rx51_jack_func;
static void rx51_ext_control(struct snd_soc_dapm_context *dapm)
{
struct snd_soc_card *card = dapm->card;
struct rx51_audio_pdata *pdata = snd_soc_card_get_drvdata(card);
int hp = 0, hs = 0, tvout = 0;
switch (rx51_jack_func) {
case RX51_JACK_TVOUT:
tvout = 1;
hp = 1;
break;
case RX51_JACK_HS:
hs = 1;
case RX51_JACK_HP:
hp = 1;
break;
}
snd_soc_dapm_mutex_lock(dapm);
if (rx51_spk_func)
snd_soc_dapm_enable_pin_unlocked(dapm, "Ext Spk");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Ext Spk");
if (rx51_dmic_func)
snd_soc_dapm_enable_pin_unlocked(dapm, "DMic");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "DMic");
if (hp)
snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
if (hs)
snd_soc_dapm_enable_pin_unlocked(dapm, "HS Mic");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "HS Mic");
gpiod_set_value(pdata->tvout_selection_gpio, tvout);
snd_soc_dapm_sync_unlocked(dapm);
snd_soc_dapm_mutex_unlock(dapm);
}
static int rx51_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
snd_pcm_hw_constraint_single(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2);
rx51_ext_control(&card->dapm);
return 0;
}
static int rx51_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
/* Set the codec system clock for DAC and ADC */
return snd_soc_dai_set_sysclk(codec_dai, 0, 19200000,
SND_SOC_CLOCK_IN);
}
static struct snd_soc_ops rx51_ops = {
.startup = rx51_startup,
.hw_params = rx51_hw_params,
};
static int rx51_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = rx51_spk_func;
return 0;
}
static int rx51_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (rx51_spk_func == ucontrol->value.enumerated.item[0])
return 0;
rx51_spk_func = ucontrol->value.enumerated.item[0];
rx51_ext_control(&card->dapm);
return 1;
}
static int rx51_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_soc_card *card = dapm->card;
struct rx51_audio_pdata *pdata = snd_soc_card_get_drvdata(card);
gpiod_set_raw_value_cansleep(pdata->speaker_amp_gpio,
!!SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int rx51_get_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = rx51_dmic_func;
return 0;
}
static int rx51_set_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (rx51_dmic_func == ucontrol->value.enumerated.item[0])
return 0;
rx51_dmic_func = ucontrol->value.enumerated.item[0];
rx51_ext_control(&card->dapm);
return 1;
}
static int rx51_get_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = rx51_jack_func;
return 0;
}
static int rx51_set_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (rx51_jack_func == ucontrol->value.enumerated.item[0])
return 0;
rx51_jack_func = ucontrol->value.enumerated.item[0];
rx51_ext_control(&card->dapm);
return 1;
}
static struct snd_soc_jack rx51_av_jack;
static struct snd_soc_jack_gpio rx51_av_jack_gpios[] = {
{
.name = "avdet-gpio",
.report = SND_JACK_HEADSET,
.invert = 1,
.debounce_time = 200,
},
};
static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Ext Spk", rx51_spk_event),
SND_SOC_DAPM_MIC("DMic", NULL),
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("HS Mic", NULL),
SND_SOC_DAPM_LINE("FM Transmitter", NULL),
SND_SOC_DAPM_SPK("Earphone", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
{"Ext Spk", NULL, "HPLOUT"},
{"Ext Spk", NULL, "HPROUT"},
{"Ext Spk", NULL, "HPLCOM"},
{"Ext Spk", NULL, "HPRCOM"},
{"FM Transmitter", NULL, "LLOUT"},
{"FM Transmitter", NULL, "RLOUT"},
{"Headphone Jack", NULL, "TPA6130A2 HPLEFT"},
{"Headphone Jack", NULL, "TPA6130A2 HPRIGHT"},
{"TPA6130A2 LEFTIN", NULL, "LLOUT"},
{"TPA6130A2 RIGHTIN", NULL, "RLOUT"},
{"DMic Rate 64", NULL, "DMic"},
{"DMic", NULL, "Mic Bias"},
{"b LINE2R", NULL, "MONO_LOUT"},
{"Earphone", NULL, "b HPLOUT"},
{"LINE1L", NULL, "HS Mic"},
{"HS Mic", NULL, "b Mic Bias"},
};
static const char * const spk_function[] = {"Off", "On"};
static const char * const input_function[] = {"ADC", "Digital Mic"};
static const char * const jack_function[] = {
"Off", "TV-OUT", "Headphone", "Headset"
};
static const struct soc_enum rx51_enum[] = {
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function),
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(input_function), input_function),
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(jack_function), jack_function),
};
static const struct snd_kcontrol_new aic34_rx51_controls[] = {
SOC_ENUM_EXT("Speaker Function", rx51_enum[0],
rx51_get_spk, rx51_set_spk),
SOC_ENUM_EXT("Input Select", rx51_enum[1],
rx51_get_input, rx51_set_input),
SOC_ENUM_EXT("Jack Function", rx51_enum[2],
rx51_get_jack, rx51_set_jack),
SOC_DAPM_PIN_SWITCH("FM Transmitter"),
SOC_DAPM_PIN_SWITCH("Earphone"),
};
static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct rx51_audio_pdata *pdata = snd_soc_card_get_drvdata(card);
int err;
snd_soc_limit_volume(card, "TPA6130A2 Headphone Playback Volume", 42);
err = omap_mcbsp_st_add_controls(rtd, 2);
if (err < 0) {
dev_err(card->dev, "Failed to add MCBSP controls\n");
return err;
}
/* AV jack detection */
err = snd_soc_card_jack_new(rtd->card, "AV Jack",
SND_JACK_HEADSET | SND_JACK_VIDEOOUT,
&rx51_av_jack, NULL, 0);
if (err) {
dev_err(card->dev, "Failed to add AV Jack\n");
return err;
}
/* prepare gpio for snd_soc_jack_add_gpios */
rx51_av_jack_gpios[0].gpio = desc_to_gpio(pdata->jack_detection_gpio);
devm_gpiod_put(card->dev, pdata->jack_detection_gpio);
err = snd_soc_jack_add_gpios(&rx51_av_jack,
ARRAY_SIZE(rx51_av_jack_gpios),
rx51_av_jack_gpios);
if (err) {
dev_err(card->dev, "Failed to add GPIOs\n");
return err;
}
return err;
}
static int rx51_card_remove(struct snd_soc_card *card)
{
snd_soc_jack_free_gpios(&rx51_av_jack, ARRAY_SIZE(rx51_av_jack_gpios),
rx51_av_jack_gpios);
return 0;
}
/* Digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link rx51_dai[] = {
{
.name = "TLV320AIC34",
.stream_name = "AIC34",
.cpu_dai_name = "omap-mcbsp.2",
.codec_dai_name = "tlv320aic3x-hifi",
.platform_name = "omap-mcbsp.2",
.codec_name = "tlv320aic3x-codec.2-0018",
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.init = rx51_aic34_init,
.ops = &rx51_ops,
},
};
static struct snd_soc_aux_dev rx51_aux_dev[] = {
{
.name = "TLV320AIC34b",
.codec_name = "tlv320aic3x-codec.2-0019",
},
{
.name = "TPA61320A2",
.codec_name = "tpa6130a2.2-0060",
},
};
static struct snd_soc_codec_conf rx51_codec_conf[] = {
{
.dev_name = "tlv320aic3x-codec.2-0019",
.name_prefix = "b",
},
{
.dev_name = "tpa6130a2.2-0060",
.name_prefix = "TPA6130A2",
},
};
/* Audio card */
static struct snd_soc_card rx51_sound_card = {
.name = "RX-51",
.owner = THIS_MODULE,
.remove = rx51_card_remove,
.dai_link = rx51_dai,
.num_links = ARRAY_SIZE(rx51_dai),
.aux_dev = rx51_aux_dev,
.num_aux_devs = ARRAY_SIZE(rx51_aux_dev),
.codec_conf = rx51_codec_conf,
.num_configs = ARRAY_SIZE(rx51_codec_conf),
.fully_routed = true,
.controls = aic34_rx51_controls,
.num_controls = ARRAY_SIZE(aic34_rx51_controls),
.dapm_widgets = aic34_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(aic34_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static int rx51_soc_probe(struct platform_device *pdev)
{
struct rx51_audio_pdata *pdata;
struct device_node *np = pdev->dev.of_node;
struct snd_soc_card *card = &rx51_sound_card;
int err;
if (!machine_is_nokia_rx51() && !of_machine_is_compatible("nokia,omap3-n900"))
return -ENODEV;
card->dev = &pdev->dev;
if (np) {
struct device_node *dai_node;
dai_node = of_parse_phandle(np, "nokia,cpu-dai", 0);
if (!dai_node) {
dev_err(&pdev->dev, "McBSP node is not provided\n");
return -EINVAL;
}
rx51_dai[0].cpu_dai_name = NULL;
rx51_dai[0].platform_name = NULL;
rx51_dai[0].cpu_of_node = dai_node;
rx51_dai[0].platform_of_node = dai_node;
dai_node = of_parse_phandle(np, "nokia,audio-codec", 0);
if (!dai_node) {
dev_err(&pdev->dev, "Codec node is not provided\n");
return -EINVAL;
}
rx51_dai[0].codec_name = NULL;
rx51_dai[0].codec_of_node = dai_node;
dai_node = of_parse_phandle(np, "nokia,audio-codec", 1);
if (!dai_node) {
dev_err(&pdev->dev, "Auxiliary Codec node is not provided\n");
return -EINVAL;
}
rx51_aux_dev[0].codec_name = NULL;
rx51_aux_dev[0].codec_of_node = dai_node;
rx51_codec_conf[0].dev_name = NULL;
rx51_codec_conf[0].of_node = dai_node;
dai_node = of_parse_phandle(np, "nokia,headphone-amplifier", 0);
if (!dai_node) {
dev_err(&pdev->dev, "Headphone amplifier node is not provided\n");
return -EINVAL;
}
rx51_aux_dev[1].codec_name = NULL;
rx51_aux_dev[1].codec_of_node = dai_node;
rx51_codec_conf[1].dev_name = NULL;
rx51_codec_conf[1].of_node = dai_node;
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (pdata == NULL) {
dev_err(card->dev, "failed to create private data\n");
return -ENOMEM;
}
snd_soc_card_set_drvdata(card, pdata);
pdata->tvout_selection_gpio = devm_gpiod_get(card->dev,
"tvout-selection",
GPIOD_OUT_LOW);
if (IS_ERR(pdata->tvout_selection_gpio)) {
dev_err(card->dev, "could not get tvout selection gpio\n");
return PTR_ERR(pdata->tvout_selection_gpio);
}
pdata->jack_detection_gpio = devm_gpiod_get(card->dev,
"jack-detection",
GPIOD_ASIS);
if (IS_ERR(pdata->jack_detection_gpio)) {
dev_err(card->dev, "could not get jack detection gpio\n");
return PTR_ERR(pdata->jack_detection_gpio);
}
pdata->eci_sw_gpio = devm_gpiod_get(card->dev, "eci-switch",
GPIOD_OUT_HIGH);
if (IS_ERR(pdata->eci_sw_gpio)) {
dev_err(card->dev, "could not get eci switch gpio\n");
return PTR_ERR(pdata->eci_sw_gpio);
}
pdata->speaker_amp_gpio = devm_gpiod_get(card->dev,
"speaker-amplifier",
GPIOD_OUT_LOW);
if (IS_ERR(pdata->speaker_amp_gpio)) {
dev_err(card->dev, "could not get speaker enable gpio\n");
return PTR_ERR(pdata->speaker_amp_gpio);
}
err = devm_snd_soc_register_card(card->dev, card);
if (err) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", err);
return err;
}
return 0;
}
#if defined(CONFIG_OF)
static const struct of_device_id rx51_audio_of_match[] = {
{ .compatible = "nokia,n900-audio", },
{},
};
MODULE_DEVICE_TABLE(of, rx51_audio_of_match);
#endif
static struct platform_driver rx51_soc_driver = {
.driver = {
.name = "rx51-audio",
.of_match_table = of_match_ptr(rx51_audio_of_match),
},
.probe = rx51_soc_probe,
};
module_platform_driver(rx51_soc_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("ALSA SoC Nokia RX-51");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rx51-audio");
| null | null | null | null | 78,933 |
39,664 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 204,659 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* kernel/lockdep_proc.c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* Code for /proc/lockdep and /proc/lockdep_stats:
*
*/
#include <linux/export.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/vmalloc.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
#include <asm/div64.h>
#include "lockdep_internals.h"
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
return seq_list_next(v, &all_lock_classes, pos);
}
static void *l_start(struct seq_file *m, loff_t *pos)
{
return seq_list_start_head(&all_lock_classes, *pos);
}
static void l_stop(struct seq_file *m, void *v)
{
}
static void print_name(struct seq_file *m, struct lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
seq_printf(m, "%s", name);
} else{
seq_printf(m, "%s", name);
if (class->name_version > 1)
seq_printf(m, "#%d", class->name_version);
if (class->subclass)
seq_printf(m, "/%d", class->subclass);
}
}
static int l_show(struct seq_file *m, void *v)
{
struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
struct lock_list *entry;
char usage[LOCK_USAGE_CHARS];
if (v == &all_lock_classes) {
seq_printf(m, "all lock classes:\n");
return 0;
}
seq_printf(m, "%p", class->key);
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops);
#endif
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
#endif
get_usage_chars(class, usage);
seq_printf(m, " %s", usage);
seq_printf(m, ": ");
print_name(m, class);
seq_puts(m, "\n");
list_for_each_entry(entry, &class->locks_after, entry) {
if (entry->distance == 1) {
seq_printf(m, " -> [%p] ", entry->class->key);
print_name(m, entry->class);
seq_puts(m, "\n");
}
}
seq_puts(m, "\n");
return 0;
}
static const struct seq_operations lockdep_ops = {
.start = l_start,
.next = l_next,
.stop = l_stop,
.show = l_show,
};
static int lockdep_open(struct inode *inode, struct file *file)
{
return seq_open(file, &lockdep_ops);
}
static const struct file_operations proc_lockdep_operations = {
.open = lockdep_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_PROVE_LOCKING
static void *lc_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0)
return SEQ_START_TOKEN;
if (*pos - 1 < nr_lock_chains)
return lock_chains + (*pos - 1);
return NULL;
}
static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return lc_start(m, pos);
}
static void lc_stop(struct seq_file *m, void *v)
{
}
static int lc_show(struct seq_file *m, void *v)
{
struct lock_chain *chain = v;
struct lock_class *class;
int i;
if (v == SEQ_START_TOKEN) {
if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
seq_printf(m, "(buggered) ");
seq_printf(m, "all lock chains:\n");
return 0;
}
seq_printf(m, "irq_context: %d\n", chain->irq_context);
for (i = 0; i < chain->depth; i++) {
class = lock_chain_get_class(chain, i);
if (!class->key)
continue;
seq_printf(m, "[%p] ", class->key);
print_name(m, class);
seq_puts(m, "\n");
}
seq_puts(m, "\n");
return 0;
}
static const struct seq_operations lockdep_chains_ops = {
.start = lc_start,
.next = lc_next,
.stop = lc_stop,
.show = lc_show,
};
static int lockdep_chains_open(struct inode *inode, struct file *file)
{
return seq_open(file, &lockdep_chains_ops);
}
static const struct file_operations proc_lockdep_chains_operations = {
.open = lockdep_chains_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROVE_LOCKING */
static void lockdep_stats_debug_show(struct seq_file *m)
{
#ifdef CONFIG_DEBUG_LOCKDEP
unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
hi2 = debug_atomic_read(hardirqs_off_events),
hr1 = debug_atomic_read(redundant_hardirqs_on),
hr2 = debug_atomic_read(redundant_hardirqs_off),
si1 = debug_atomic_read(softirqs_on_events),
si2 = debug_atomic_read(softirqs_off_events),
sr1 = debug_atomic_read(redundant_softirqs_on),
sr2 = debug_atomic_read(redundant_softirqs_off);
seq_printf(m, " chain lookup misses: %11llu\n",
debug_atomic_read(chain_lookup_misses));
seq_printf(m, " chain lookup hits: %11llu\n",
debug_atomic_read(chain_lookup_hits));
seq_printf(m, " cyclic checks: %11llu\n",
debug_atomic_read(nr_cyclic_checks));
seq_printf(m, " find-mask forwards checks: %11llu\n",
debug_atomic_read(nr_find_usage_forwards_checks));
seq_printf(m, " find-mask backwards checks: %11llu\n",
debug_atomic_read(nr_find_usage_backwards_checks));
seq_printf(m, " hardirq on events: %11llu\n", hi1);
seq_printf(m, " hardirq off events: %11llu\n", hi2);
seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
seq_printf(m, " softirq on events: %11llu\n", si1);
seq_printf(m, " softirq off events: %11llu\n", si2);
seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
#endif
}
static int lockdep_stats_show(struct seq_file *m, void *v)
{
struct lock_class *class;
unsigned long nr_unused = 0, nr_uncategorized = 0,
nr_irq_safe = 0, nr_irq_unsafe = 0,
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
sum_forward_deps = 0;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (class->usage_mask == 0)
nr_unused++;
if (class->usage_mask == LOCKF_USED)
nr_uncategorized++;
if (class->usage_mask & LOCKF_USED_IN_IRQ)
nr_irq_safe++;
if (class->usage_mask & LOCKF_ENABLED_IRQ)
nr_irq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
nr_softirq_safe++;
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
nr_softirq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
nr_hardirq_safe++;
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
nr_hardirq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
nr_irq_read_safe++;
if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
nr_irq_read_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
nr_softirq_read_safe++;
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
nr_softirq_read_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
nr_hardirq_read_safe++;
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++;
#ifdef CONFIG_PROVE_LOCKING
sum_forward_deps += lockdep_count_forward_deps(class);
#endif
}
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
#endif
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS);
seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
nr_list_entries, MAX_LOCKDEP_ENTRIES);
seq_printf(m, " indirect dependencies: %11lu\n",
sum_forward_deps);
/*
* Total number of dependencies:
*
* All irq-safe locks may nest inside irq-unsafe locks,
* plus all the other known dependencies:
*/
seq_printf(m, " all direct dependencies: %11lu\n",
nr_irq_unsafe * nr_irq_safe +
nr_hardirq_unsafe * nr_hardirq_safe +
nr_list_entries);
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
nr_lock_chains, MAX_LOCKDEP_CHAINS);
seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
seq_printf(m, " in-hardirq chains: %11u\n",
nr_hardirq_chains);
seq_printf(m, " in-softirq chains: %11u\n",
nr_softirq_chains);
#endif
seq_printf(m, " in-process chains: %11u\n",
nr_process_chains);
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
seq_printf(m, " combined max dependencies: %11u\n",
(nr_hardirq_chains + 1) *
(nr_softirq_chains + 1) *
(nr_process_chains + 1)
);
seq_printf(m, " hardirq-safe locks: %11lu\n",
nr_hardirq_safe);
seq_printf(m, " hardirq-unsafe locks: %11lu\n",
nr_hardirq_unsafe);
seq_printf(m, " softirq-safe locks: %11lu\n",
nr_softirq_safe);
seq_printf(m, " softirq-unsafe locks: %11lu\n",
nr_softirq_unsafe);
seq_printf(m, " irq-safe locks: %11lu\n",
nr_irq_safe);
seq_printf(m, " irq-unsafe locks: %11lu\n",
nr_irq_unsafe);
seq_printf(m, " hardirq-read-safe locks: %11lu\n",
nr_hardirq_read_safe);
seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
nr_hardirq_read_unsafe);
seq_printf(m, " softirq-read-safe locks: %11lu\n",
nr_softirq_read_safe);
seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
nr_softirq_read_unsafe);
seq_printf(m, " irq-read-safe locks: %11lu\n",
nr_irq_read_safe);
seq_printf(m, " irq-read-unsafe locks: %11lu\n",
nr_irq_read_unsafe);
seq_printf(m, " uncategorized locks: %11lu\n",
nr_uncategorized);
seq_printf(m, " unused locks: %11lu\n",
nr_unused);
seq_printf(m, " max locking depth: %11u\n",
max_lockdep_depth);
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " max bfs queue depth: %11u\n",
max_bfs_queue_depth);
#endif
lockdep_stats_debug_show(m);
seq_printf(m, " debug_locks: %11u\n",
debug_locks);
return 0;
}
static int lockdep_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, lockdep_stats_show, NULL);
}
static const struct file_operations proc_lockdep_stats_operations = {
.open = lockdep_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#ifdef CONFIG_LOCK_STAT
struct lock_stat_data {
struct lock_class *class;
struct lock_class_stats stats;
};
struct lock_stat_seq {
struct lock_stat_data *iter_end;
struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
};
/*
* sort on absolute number of contentions
*/
static int lock_stat_cmp(const void *l, const void *r)
{
const struct lock_stat_data *dl = l, *dr = r;
unsigned long nl, nr;
nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
return nr - nl;
}
static void seq_line(struct seq_file *m, char c, int offset, int length)
{
int i;
for (i = 0; i < offset; i++)
seq_puts(m, " ");
for (i = 0; i < length; i++)
seq_printf(m, "%c", c);
seq_puts(m, "\n");
}
static void snprint_time(char *buf, size_t bufsiz, s64 nr)
{
s64 div;
s32 rem;
nr += 5; /* for display rounding */
div = div_s64_rem(nr, 1000, &rem);
snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
}
static void seq_time(struct seq_file *m, s64 time)
{
char num[15];
snprint_time(num, sizeof(num), time);
seq_printf(m, " %14s", num);
}
static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
{
seq_printf(m, "%14lu", lt->nr);
seq_time(m, lt->min);
seq_time(m, lt->max);
seq_time(m, lt->total);
seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
}
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
{
struct lockdep_subclass_key *ckey;
struct lock_class_stats *stats;
struct lock_class *class;
const char *cname;
int i, namelen;
char name[39];
class = data->class;
stats = &data->stats;
namelen = 38;
if (class->name_version > 1)
namelen -= 2; /* XXX truncates versions > 9 */
if (class->subclass)
namelen -= 2;
rcu_read_lock_sched();
cname = rcu_dereference_sched(class->name);
ckey = rcu_dereference_sched(class->key);
if (!cname && !ckey) {
rcu_read_unlock_sched();
return;
} else if (!cname) {
char str[KSYM_NAME_LEN];
const char *key_name;
key_name = __get_key_name(ckey, str);
snprintf(name, namelen, "%s", key_name);
} else {
snprintf(name, namelen, "%s", cname);
}
rcu_read_unlock_sched();
namelen = strlen(name);
if (class->name_version > 1) {
snprintf(name+namelen, 3, "#%d", class->name_version);
namelen += 2;
}
if (class->subclass) {
snprintf(name+namelen, 3, "/%d", class->subclass);
namelen += 2;
}
if (stats->write_holdtime.nr) {
if (stats->read_holdtime.nr)
seq_printf(m, "%38s-W:", name);
else
seq_printf(m, "%40s:", name);
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
seq_lock_time(m, &stats->write_waittime);
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
seq_lock_time(m, &stats->write_holdtime);
seq_puts(m, "\n");
}
if (stats->read_holdtime.nr) {
seq_printf(m, "%38s-R:", name);
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
seq_lock_time(m, &stats->read_waittime);
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
seq_lock_time(m, &stats->read_holdtime);
seq_puts(m, "\n");
}
if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
return;
if (stats->read_holdtime.nr)
namelen += 2;
for (i = 0; i < LOCKSTAT_POINTS; i++) {
char ip[32];
if (class->contention_point[i] == 0)
break;
if (!i)
seq_line(m, '-', 40-namelen, namelen);
snprintf(ip, sizeof(ip), "[<%p>]",
(void *)class->contention_point[i]);
seq_printf(m, "%40s %14lu %29s %pS\n",
name, stats->contention_point[i],
ip, (void *)class->contention_point[i]);
}
for (i = 0; i < LOCKSTAT_POINTS; i++) {
char ip[32];
if (class->contending_point[i] == 0)
break;
if (!i)
seq_line(m, '-', 40-namelen, namelen);
snprintf(ip, sizeof(ip), "[<%p>]",
(void *)class->contending_point[i]);
seq_printf(m, "%40s %14lu %29s %pS\n",
name, stats->contending_point[i],
ip, (void *)class->contending_point[i]);
}
if (i) {
seq_puts(m, "\n");
seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
seq_puts(m, "\n");
}
}
static void seq_header(struct seq_file *m)
{
seq_puts(m, "lock_stat version 0.4\n");
if (unlikely(!debug_locks))
seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
"%14s %14s\n",
"class name",
"con-bounces",
"contentions",
"waittime-min",
"waittime-max",
"waittime-total",
"waittime-avg",
"acq-bounces",
"acquisitions",
"holdtime-min",
"holdtime-max",
"holdtime-total",
"holdtime-avg");
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
seq_printf(m, "\n");
}
static void *ls_start(struct seq_file *m, loff_t *pos)
{
struct lock_stat_seq *data = m->private;
struct lock_stat_data *iter;
if (*pos == 0)
return SEQ_START_TOKEN;
iter = data->stats + (*pos - 1);
if (iter >= data->iter_end)
iter = NULL;
return iter;
}
static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return ls_start(m, pos);
}
static void ls_stop(struct seq_file *m, void *v)
{
}
static int ls_show(struct seq_file *m, void *v)
{
if (v == SEQ_START_TOKEN)
seq_header(m);
else
seq_stats(m, v);
return 0;
}
static const struct seq_operations lockstat_ops = {
.start = ls_start,
.next = ls_next,
.stop = ls_stop,
.show = ls_show,
};
static int lock_stat_open(struct inode *inode, struct file *file)
{
int res;
struct lock_class *class;
struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
if (!data)
return -ENOMEM;
res = seq_open(file, &lockstat_ops);
if (!res) {
struct lock_stat_data *iter = data->stats;
struct seq_file *m = file->private_data;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
iter->class = class;
iter->stats = lock_stats(class);
iter++;
}
data->iter_end = iter;
sort(data->stats, data->iter_end - data->stats,
sizeof(struct lock_stat_data),
lock_stat_cmp, NULL);
m->private = data;
} else
vfree(data);
return res;
}
static ssize_t lock_stat_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct lock_class *class;
char c;
if (count) {
if (get_user(c, buf))
return -EFAULT;
if (c != '0')
return count;
list_for_each_entry(class, &all_lock_classes, lock_entry)
clear_lock_stats(class);
}
return count;
}
static int lock_stat_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
vfree(seq->private);
return seq_release(inode, file);
}
static const struct file_operations proc_lock_stat_operations = {
.open = lock_stat_open,
.write = lock_stat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = lock_stat_release,
};
#endif /* CONFIG_LOCK_STAT */
static int __init lockdep_proc_init(void)
{
proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
#ifdef CONFIG_PROVE_LOCKING
proc_create("lockdep_chains", S_IRUSR, NULL,
&proc_lockdep_chains_operations);
#endif
proc_create("lockdep_stats", S_IRUSR, NULL,
&proc_lockdep_stats_operations);
#ifdef CONFIG_LOCK_STAT
proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
&proc_lock_stat_operations);
#endif
return 0;
}
__initcall(lockdep_proc_init);
| null | null | null | null | 113,006 |
1,851 | null |
train_val
|
1b0d3845b454eaaac0b2064c78926ca4d739a080
| 264,419 |
qemu
| 0 |
https://github.com/bonzini/qemu
|
2016-10-18 11:40:27+01:00
|
/*
* QEMU model of the Milkymist SD Card Controller.
*
* Copyright (c) 2010 Michael Walle <michael@walle.cc>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*
* Specification available at:
* http://milkymist.walle.cc/socdoc/memcard.pdf
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/sysbus.h"
#include "sysemu/sysemu.h"
#include "trace.h"
#include "qemu/error-report.h"
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
#include "hw/sd/sd.h"
enum {
ENABLE_CMD_TX = (1<<0),
ENABLE_CMD_RX = (1<<1),
ENABLE_DAT_TX = (1<<2),
ENABLE_DAT_RX = (1<<3),
};
enum {
PENDING_CMD_TX = (1<<0),
PENDING_CMD_RX = (1<<1),
PENDING_DAT_TX = (1<<2),
PENDING_DAT_RX = (1<<3),
};
enum {
START_CMD_TX = (1<<0),
START_DAT_RX = (1<<1),
};
enum {
R_CLK2XDIV = 0,
R_ENABLE,
R_PENDING,
R_START,
R_CMD,
R_DAT,
R_MAX
};
#define TYPE_MILKYMIST_MEMCARD "milkymist-memcard"
#define MILKYMIST_MEMCARD(obj) \
OBJECT_CHECK(MilkymistMemcardState, (obj), TYPE_MILKYMIST_MEMCARD)
struct MilkymistMemcardState {
SysBusDevice parent_obj;
MemoryRegion regs_region;
SDState *card;
int command_write_ptr;
int response_read_ptr;
int response_len;
int ignore_next_cmd;
int enabled;
uint8_t command[6];
uint8_t response[17];
uint32_t regs[R_MAX];
};
typedef struct MilkymistMemcardState MilkymistMemcardState;
static void update_pending_bits(MilkymistMemcardState *s)
{
/* transmits are instantaneous, thus tx pending bits are never set */
s->regs[R_PENDING] = 0;
/* if rx is enabled the corresponding pending bits are always set */
if (s->regs[R_ENABLE] & ENABLE_CMD_RX) {
s->regs[R_PENDING] |= PENDING_CMD_RX;
}
if (s->regs[R_ENABLE] & ENABLE_DAT_RX) {
s->regs[R_PENDING] |= PENDING_DAT_RX;
}
}
static void memcard_sd_command(MilkymistMemcardState *s)
{
SDRequest req;
req.cmd = s->command[0] & 0x3f;
req.arg = (s->command[1] << 24) | (s->command[2] << 16)
| (s->command[3] << 8) | s->command[4];
req.crc = s->command[5];
s->response[0] = req.cmd;
s->response_len = sd_do_command(s->card, &req, s->response+1);
s->response_read_ptr = 0;
if (s->response_len == 16) {
/* R2 response */
s->response[0] = 0x3f;
s->response_len += 1;
} else if (s->response_len == 4) {
/* no crc calculation, insert dummy byte */
s->response[5] = 0;
s->response_len += 2;
}
if (req.cmd == 0) {
/* next write is a dummy byte to clock the initialization of the sd
* card */
s->ignore_next_cmd = 1;
}
}
static uint64_t memcard_read(void *opaque, hwaddr addr,
unsigned size)
{
MilkymistMemcardState *s = opaque;
uint32_t r = 0;
addr >>= 2;
switch (addr) {
case R_CMD:
if (!s->enabled) {
r = 0xff;
} else {
r = s->response[s->response_read_ptr++];
if (s->response_read_ptr > s->response_len) {
error_report("milkymist_memcard: "
"read more cmd bytes than available. Clipping.");
s->response_read_ptr = 0;
}
}
break;
case R_DAT:
if (!s->enabled) {
r = 0xffffffff;
} else {
r = 0;
r |= sd_read_data(s->card) << 24;
r |= sd_read_data(s->card) << 16;
r |= sd_read_data(s->card) << 8;
r |= sd_read_data(s->card);
}
break;
case R_CLK2XDIV:
case R_ENABLE:
case R_PENDING:
case R_START:
r = s->regs[addr];
break;
default:
error_report("milkymist_memcard: read access to unknown register 0x"
TARGET_FMT_plx, addr << 2);
break;
}
trace_milkymist_memcard_memory_read(addr << 2, r);
return r;
}
static void memcard_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
MilkymistMemcardState *s = opaque;
trace_milkymist_memcard_memory_write(addr, value);
addr >>= 2;
switch (addr) {
case R_PENDING:
/* clear rx pending bits */
s->regs[R_PENDING] &= ~(value & (PENDING_CMD_RX | PENDING_DAT_RX));
update_pending_bits(s);
break;
case R_CMD:
if (!s->enabled) {
break;
}
if (s->ignore_next_cmd) {
s->ignore_next_cmd = 0;
break;
}
s->command[s->command_write_ptr] = value & 0xff;
s->command_write_ptr = (s->command_write_ptr + 1) % 6;
if (s->command_write_ptr == 0) {
memcard_sd_command(s);
}
break;
case R_DAT:
if (!s->enabled) {
break;
}
sd_write_data(s->card, (value >> 24) & 0xff);
sd_write_data(s->card, (value >> 16) & 0xff);
sd_write_data(s->card, (value >> 8) & 0xff);
sd_write_data(s->card, value & 0xff);
break;
case R_ENABLE:
s->regs[addr] = value;
update_pending_bits(s);
break;
case R_CLK2XDIV:
case R_START:
s->regs[addr] = value;
break;
default:
error_report("milkymist_memcard: write access to unknown register 0x"
TARGET_FMT_plx, addr << 2);
break;
}
}
static const MemoryRegionOps memcard_mmio_ops = {
.read = memcard_read,
.write = memcard_write,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
.endianness = DEVICE_NATIVE_ENDIAN,
};
static void milkymist_memcard_reset(DeviceState *d)
{
MilkymistMemcardState *s = MILKYMIST_MEMCARD(d);
int i;
s->command_write_ptr = 0;
s->response_read_ptr = 0;
s->response_len = 0;
for (i = 0; i < R_MAX; i++) {
s->regs[i] = 0;
}
}
static int milkymist_memcard_init(SysBusDevice *dev)
{
MilkymistMemcardState *s = MILKYMIST_MEMCARD(dev);
DriveInfo *dinfo;
BlockBackend *blk;
/* FIXME use a qdev drive property instead of drive_get_next() */
dinfo = drive_get_next(IF_SD);
blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL;
s->card = sd_init(blk, false);
if (s->card == NULL) {
return -1;
}
s->enabled = blk && blk_is_inserted(blk);
memory_region_init_io(&s->regs_region, OBJECT(s), &memcard_mmio_ops, s,
"milkymist-memcard", R_MAX * 4);
sysbus_init_mmio(dev, &s->regs_region);
return 0;
}
static const VMStateDescription vmstate_milkymist_memcard = {
.name = "milkymist-memcard",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT32(command_write_ptr, MilkymistMemcardState),
VMSTATE_INT32(response_read_ptr, MilkymistMemcardState),
VMSTATE_INT32(response_len, MilkymistMemcardState),
VMSTATE_INT32(ignore_next_cmd, MilkymistMemcardState),
VMSTATE_INT32(enabled, MilkymistMemcardState),
VMSTATE_UINT8_ARRAY(command, MilkymistMemcardState, 6),
VMSTATE_UINT8_ARRAY(response, MilkymistMemcardState, 17),
VMSTATE_UINT32_ARRAY(regs, MilkymistMemcardState, R_MAX),
VMSTATE_END_OF_LIST()
}
};
static void milkymist_memcard_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
k->init = milkymist_memcard_init;
dc->reset = milkymist_memcard_reset;
dc->vmsd = &vmstate_milkymist_memcard;
/* Reason: init() method uses drive_get_next() */
dc->cannot_instantiate_with_device_add_yet = true;
}
static const TypeInfo milkymist_memcard_info = {
.name = TYPE_MILKYMIST_MEMCARD,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(MilkymistMemcardState),
.class_init = milkymist_memcard_class_init,
};
static void milkymist_memcard_register_types(void)
{
type_register_static(&milkymist_memcard_info);
}
type_init(milkymist_memcard_register_types)
| null | null | null | null | 122,543 |
30,528 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 195,523 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Driver for Goodix Touchscreens
*
* Copyright (c) 2014 Red Hat Inc.
* Copyright (c) 2015 K. Merker <merker@debian.org>
*
* This code is based on gt9xx.c authored by andrew@goodix.com:
*
* 2010 - 2012 Goodix Technology.
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; version 2 of the License.
*/
#include <linux/kernel.h>
#include <linux/dmi.h>
#include <linux/firmware.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/of.h>
#include <asm/unaligned.h>
struct goodix_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
int abs_x_max;
int abs_y_max;
bool swapped_x_y;
bool inverted_x;
bool inverted_y;
unsigned int max_touch_num;
unsigned int int_trigger_type;
int cfg_len;
struct gpio_desc *gpiod_int;
struct gpio_desc *gpiod_rst;
u16 id;
u16 version;
const char *cfg_name;
struct completion firmware_loading_complete;
unsigned long irq_flags;
};
#define GOODIX_GPIO_INT_NAME "irq"
#define GOODIX_GPIO_RST_NAME "reset"
#define GOODIX_MAX_HEIGHT 4096
#define GOODIX_MAX_WIDTH 4096
#define GOODIX_INT_TRIGGER 1
#define GOODIX_CONTACT_SIZE 8
#define GOODIX_MAX_CONTACTS 10
#define GOODIX_CONFIG_MAX_LENGTH 240
#define GOODIX_CONFIG_911_LENGTH 186
#define GOODIX_CONFIG_967_LENGTH 228
/* Register defines */
#define GOODIX_REG_COMMAND 0x8040
#define GOODIX_CMD_SCREEN_OFF 0x05
#define GOODIX_READ_COOR_ADDR 0x814E
#define GOODIX_REG_CONFIG_DATA 0x8047
#define GOODIX_REG_ID 0x8140
#define RESOLUTION_LOC 1
#define MAX_CONTACTS_LOC 5
#define TRIGGER_LOC 6
static const unsigned long goodix_irq_flags[] = {
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_EDGE_FALLING,
IRQ_TYPE_LEVEL_LOW,
IRQ_TYPE_LEVEL_HIGH,
};
/*
* Those tablets have their coordinates origin at the bottom right
* of the tablet, as if rotated 180 degrees
*/
static const struct dmi_system_id rotated_screen[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
.ident = "WinBook TW100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
}
},
{
.ident = "WinBook TW700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
},
},
#endif
{}
};
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
* @client: i2c device.
* @reg: the register to read from.
* @buf: raw write data buffer.
* @len: length of the buffer to write
*/
static int goodix_i2c_read(struct i2c_client *client,
u16 reg, u8 *buf, int len)
{
struct i2c_msg msgs[2];
u16 wbuf = cpu_to_be16(reg);
int ret;
msgs[0].flags = 0;
msgs[0].addr = client->addr;
msgs[0].len = 2;
msgs[0].buf = (u8 *)&wbuf;
msgs[1].flags = I2C_M_RD;
msgs[1].addr = client->addr;
msgs[1].len = len;
msgs[1].buf = buf;
ret = i2c_transfer(client->adapter, msgs, 2);
return ret < 0 ? ret : (ret != ARRAY_SIZE(msgs) ? -EIO : 0);
}
/**
* goodix_i2c_write - write data to a register of the i2c slave device.
*
* @client: i2c device.
* @reg: the register to write to.
* @buf: raw data buffer to write.
* @len: length of the buffer to write
*/
static int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf,
unsigned len)
{
u8 *addr_buf;
struct i2c_msg msg;
int ret;
addr_buf = kmalloc(len + 2, GFP_KERNEL);
if (!addr_buf)
return -ENOMEM;
addr_buf[0] = reg >> 8;
addr_buf[1] = reg & 0xFF;
memcpy(&addr_buf[2], buf, len);
msg.flags = 0;
msg.addr = client->addr;
msg.buf = addr_buf;
msg.len = len + 2;
ret = i2c_transfer(client->adapter, &msg, 1);
kfree(addr_buf);
return ret < 0 ? ret : (ret != 1 ? -EIO : 0);
}
static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value)
{
return goodix_i2c_write(client, reg, &value, sizeof(value));
}
static int goodix_get_cfg_len(u16 id)
{
switch (id) {
case 911:
case 9271:
case 9110:
case 927:
case 928:
return GOODIX_CONFIG_911_LENGTH;
case 912:
case 967:
return GOODIX_CONFIG_967_LENGTH;
default:
return GOODIX_CONFIG_MAX_LENGTH;
}
}
static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
{
int touch_num;
int error;
error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data,
GOODIX_CONTACT_SIZE + 1);
if (error) {
dev_err(&ts->client->dev, "I2C transfer error: %d\n", error);
return error;
}
if (!(data[0] & 0x80))
return -EAGAIN;
touch_num = data[0] & 0x0f;
if (touch_num > ts->max_touch_num)
return -EPROTO;
if (touch_num > 1) {
data += 1 + GOODIX_CONTACT_SIZE;
error = goodix_i2c_read(ts->client,
GOODIX_READ_COOR_ADDR +
1 + GOODIX_CONTACT_SIZE,
data,
GOODIX_CONTACT_SIZE * (touch_num - 1));
if (error)
return error;
}
return touch_num;
}
static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[0] & 0x0F;
int input_x = get_unaligned_le16(&coor_data[1]);
int input_y = get_unaligned_le16(&coor_data[3]);
int input_w = get_unaligned_le16(&coor_data[5]);
/* Inversions have to happen before axis swapping */
if (ts->inverted_x)
input_x = ts->abs_x_max - input_x;
if (ts->inverted_y)
input_y = ts->abs_y_max - input_y;
if (ts->swapped_x_y)
swap(input_x, input_y);
input_mt_slot(ts->input_dev, id);
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, input_y);
input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
/**
* goodix_process_events - Process incoming events
*
* @ts: our goodix_ts_data pointer
*
* Called when the IRQ is triggered. Read the current device state, and push
* the input events to the user space.
*/
static void goodix_process_events(struct goodix_ts_data *ts)
{
u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
int touch_num;
int i;
touch_num = goodix_ts_read_input_report(ts, point_data);
if (touch_num < 0)
return;
for (i = 0; i < touch_num; i++)
goodix_ts_report_touch(ts,
&point_data[1 + GOODIX_CONTACT_SIZE * i]);
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
}
/**
* goodix_ts_irq_handler - The IRQ handler
*
* @irq: interrupt number.
* @dev_id: private data pointer.
*/
static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
{
struct goodix_ts_data *ts = dev_id;
goodix_process_events(ts);
if (goodix_i2c_write_u8(ts->client, GOODIX_READ_COOR_ADDR, 0) < 0)
dev_err(&ts->client->dev, "I2C write end_cmd error\n");
return IRQ_HANDLED;
}
static void goodix_free_irq(struct goodix_ts_data *ts)
{
devm_free_irq(&ts->client->dev, ts->client->irq, ts);
}
static int goodix_request_irq(struct goodix_ts_data *ts)
{
return devm_request_threaded_irq(&ts->client->dev, ts->client->irq,
NULL, goodix_ts_irq_handler,
ts->irq_flags, ts->client->name, ts);
}
/**
* goodix_check_cfg - Checks if config fw is valid
*
* @ts: goodix_ts_data pointer
* @cfg: firmware config data
*/
static int goodix_check_cfg(struct goodix_ts_data *ts,
const struct firmware *cfg)
{
int i, raw_cfg_len;
u8 check_sum = 0;
if (cfg->size > GOODIX_CONFIG_MAX_LENGTH) {
dev_err(&ts->client->dev,
"The length of the config fw is not correct");
return -EINVAL;
}
raw_cfg_len = cfg->size - 2;
for (i = 0; i < raw_cfg_len; i++)
check_sum += cfg->data[i];
check_sum = (~check_sum) + 1;
if (check_sum != cfg->data[raw_cfg_len]) {
dev_err(&ts->client->dev,
"The checksum of the config fw is not correct");
return -EINVAL;
}
if (cfg->data[raw_cfg_len + 1] != 1) {
dev_err(&ts->client->dev,
"Config fw must have Config_Fresh register set");
return -EINVAL;
}
return 0;
}
/**
* goodix_send_cfg - Write fw config to device
*
* @ts: goodix_ts_data pointer
* @cfg: config firmware to write to device
*/
static int goodix_send_cfg(struct goodix_ts_data *ts,
const struct firmware *cfg)
{
int error;
error = goodix_check_cfg(ts, cfg);
if (error)
return error;
error = goodix_i2c_write(ts->client, GOODIX_REG_CONFIG_DATA, cfg->data,
cfg->size);
if (error) {
dev_err(&ts->client->dev, "Failed to write config data: %d",
error);
return error;
}
dev_dbg(&ts->client->dev, "Config sent successfully.");
/* Let the firmware reconfigure itself, so sleep for 10ms */
usleep_range(10000, 11000);
return 0;
}
static int goodix_int_sync(struct goodix_ts_data *ts)
{
int error;
error = gpiod_direction_output(ts->gpiod_int, 0);
if (error)
return error;
msleep(50); /* T5: 50ms */
error = gpiod_direction_input(ts->gpiod_int);
if (error)
return error;
return 0;
}
/**
* goodix_reset - Reset device during power on
*
* @ts: goodix_ts_data pointer
*/
static int goodix_reset(struct goodix_ts_data *ts)
{
int error;
/* begin select I2C slave addr */
error = gpiod_direction_output(ts->gpiod_rst, 0);
if (error)
return error;
msleep(20); /* T2: > 10ms */
/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
error = gpiod_direction_output(ts->gpiod_int, ts->client->addr == 0x14);
if (error)
return error;
usleep_range(100, 2000); /* T3: > 100us */
error = gpiod_direction_output(ts->gpiod_rst, 1);
if (error)
return error;
usleep_range(6000, 10000); /* T4: > 5ms */
/* end select I2C slave addr */
error = gpiod_direction_input(ts->gpiod_rst);
if (error)
return error;
error = goodix_int_sync(ts);
if (error)
return error;
return 0;
}
/**
* goodix_get_gpio_config - Get GPIO config from ACPI/DT
*
* @ts: goodix_ts_data pointer
*/
static int goodix_get_gpio_config(struct goodix_ts_data *ts)
{
int error;
struct device *dev;
struct gpio_desc *gpiod;
if (!ts->client)
return -EINVAL;
dev = &ts->client->dev;
/* Get the interrupt GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
dev_dbg(dev, "Failed to get %s GPIO: %d\n",
GOODIX_GPIO_INT_NAME, error);
return error;
}
ts->gpiod_int = gpiod;
/* Get the reset line GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN);
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
dev_dbg(dev, "Failed to get %s GPIO: %d\n",
GOODIX_GPIO_RST_NAME, error);
return error;
}
ts->gpiod_rst = gpiod;
return 0;
}
/**
* goodix_read_config - Read the embedded configuration of the panel
*
* @ts: our goodix_ts_data pointer
*
* Must be called during probe
*/
static void goodix_read_config(struct goodix_ts_data *ts)
{
u8 config[GOODIX_CONFIG_MAX_LENGTH];
int error;
error = goodix_i2c_read(ts->client, GOODIX_REG_CONFIG_DATA,
config, ts->cfg_len);
if (error) {
dev_warn(&ts->client->dev,
"Error reading config (%d), using defaults\n",
error);
ts->abs_x_max = GOODIX_MAX_WIDTH;
ts->abs_y_max = GOODIX_MAX_HEIGHT;
if (ts->swapped_x_y)
swap(ts->abs_x_max, ts->abs_y_max);
ts->int_trigger_type = GOODIX_INT_TRIGGER;
ts->max_touch_num = GOODIX_MAX_CONTACTS;
return;
}
ts->abs_x_max = get_unaligned_le16(&config[RESOLUTION_LOC]);
ts->abs_y_max = get_unaligned_le16(&config[RESOLUTION_LOC + 2]);
if (ts->swapped_x_y)
swap(ts->abs_x_max, ts->abs_y_max);
ts->int_trigger_type = config[TRIGGER_LOC] & 0x03;
ts->max_touch_num = config[MAX_CONTACTS_LOC] & 0x0f;
if (!ts->abs_x_max || !ts->abs_y_max || !ts->max_touch_num) {
dev_err(&ts->client->dev,
"Invalid config, using defaults\n");
ts->abs_x_max = GOODIX_MAX_WIDTH;
ts->abs_y_max = GOODIX_MAX_HEIGHT;
if (ts->swapped_x_y)
swap(ts->abs_x_max, ts->abs_y_max);
ts->max_touch_num = GOODIX_MAX_CONTACTS;
}
if (dmi_check_system(rotated_screen)) {
ts->inverted_x = true;
ts->inverted_y = true;
dev_dbg(&ts->client->dev,
"Applying '180 degrees rotated screen' quirk\n");
}
}
/**
* goodix_read_version - Read goodix touchscreen version
*
* @ts: our goodix_ts_data pointer
*/
static int goodix_read_version(struct goodix_ts_data *ts)
{
int error;
u8 buf[6];
char id_str[5];
error = goodix_i2c_read(ts->client, GOODIX_REG_ID, buf, sizeof(buf));
if (error) {
dev_err(&ts->client->dev, "read version failed: %d\n", error);
return error;
}
memcpy(id_str, buf, 4);
id_str[4] = 0;
if (kstrtou16(id_str, 10, &ts->id))
ts->id = 0x1001;
ts->version = get_unaligned_le16(&buf[4]);
dev_info(&ts->client->dev, "ID %d, version: %04x\n", ts->id,
ts->version);
return 0;
}
/**
* goodix_i2c_test - I2C test function to check if the device answers.
*
* @client: the i2c client
*/
static int goodix_i2c_test(struct i2c_client *client)
{
int retry = 0;
int error;
u8 test;
while (retry++ < 2) {
error = goodix_i2c_read(client, GOODIX_REG_CONFIG_DATA,
&test, 1);
if (!error)
return 0;
dev_err(&client->dev, "i2c test failed attempt %d: %d\n",
retry, error);
msleep(20);
}
return error;
}
/**
* goodix_request_input_dev - Allocate, populate and register the input device
*
* @ts: our goodix_ts_data pointer
*
* Must be called during probe
*/
static int goodix_request_input_dev(struct goodix_ts_data *ts)
{
int error;
ts->input_dev = devm_input_allocate_device(&ts->client->dev);
if (!ts->input_dev) {
dev_err(&ts->client->dev, "Failed to allocate input device.");
return -ENOMEM;
}
input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X,
0, ts->abs_x_max, 0, 0);
input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y,
0, ts->abs_y_max, 0, 0);
input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
input_mt_init_slots(ts->input_dev, ts->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
ts->input_dev->name = "Goodix Capacitive TouchScreen";
ts->input_dev->phys = "input/ts";
ts->input_dev->id.bustype = BUS_I2C;
ts->input_dev->id.vendor = 0x0416;
ts->input_dev->id.product = ts->id;
ts->input_dev->id.version = ts->version;
error = input_register_device(ts->input_dev);
if (error) {
dev_err(&ts->client->dev,
"Failed to register input device: %d", error);
return error;
}
return 0;
}
/**
* goodix_configure_dev - Finish device initialization
*
* @ts: our goodix_ts_data pointer
*
* Must be called from probe to finish initialization of the device.
* Contains the common initialization code for both devices that
* declare gpio pins and devices that do not. It is either called
* directly from probe or from request_firmware_wait callback.
*/
static int goodix_configure_dev(struct goodix_ts_data *ts)
{
int error;
ts->swapped_x_y = device_property_read_bool(&ts->client->dev,
"touchscreen-swapped-x-y");
ts->inverted_x = device_property_read_bool(&ts->client->dev,
"touchscreen-inverted-x");
ts->inverted_y = device_property_read_bool(&ts->client->dev,
"touchscreen-inverted-y");
goodix_read_config(ts);
error = goodix_request_input_dev(ts);
if (error)
return error;
ts->irq_flags = goodix_irq_flags[ts->int_trigger_type] | IRQF_ONESHOT;
error = goodix_request_irq(ts);
if (error) {
dev_err(&ts->client->dev, "request IRQ failed: %d\n", error);
return error;
}
return 0;
}
/**
* goodix_config_cb - Callback to finish device init
*
* @ts: our goodix_ts_data pointer
*
* request_firmware_wait callback that finishes
* initialization of the device.
*/
static void goodix_config_cb(const struct firmware *cfg, void *ctx)
{
struct goodix_ts_data *ts = ctx;
int error;
if (cfg) {
/* send device configuration to the firmware */
error = goodix_send_cfg(ts, cfg);
if (error)
goto err_release_cfg;
}
goodix_configure_dev(ts);
err_release_cfg:
release_firmware(cfg);
complete_all(&ts->firmware_loading_complete);
}
static int goodix_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct goodix_ts_data *ts;
int error;
dev_dbg(&client->dev, "I2C Address: 0x%02x\n", client->addr);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "I2C check functionality failed.\n");
return -ENXIO;
}
ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
ts->client = client;
i2c_set_clientdata(client, ts);
init_completion(&ts->firmware_loading_complete);
error = goodix_get_gpio_config(ts);
if (error)
return error;
if (ts->gpiod_int && ts->gpiod_rst) {
/* reset the controller */
error = goodix_reset(ts);
if (error) {
dev_err(&client->dev, "Controller reset failed.\n");
return error;
}
}
error = goodix_i2c_test(client);
if (error) {
dev_err(&client->dev, "I2C communication failure: %d\n", error);
return error;
}
error = goodix_read_version(ts);
if (error) {
dev_err(&client->dev, "Read version failed.\n");
return error;
}
ts->cfg_len = goodix_get_cfg_len(ts->id);
if (ts->gpiod_int && ts->gpiod_rst) {
/* update device config */
ts->cfg_name = devm_kasprintf(&client->dev, GFP_KERNEL,
"goodix_%d_cfg.bin", ts->id);
if (!ts->cfg_name)
return -ENOMEM;
error = request_firmware_nowait(THIS_MODULE, true, ts->cfg_name,
&client->dev, GFP_KERNEL, ts,
goodix_config_cb);
if (error) {
dev_err(&client->dev,
"Failed to invoke firmware loader: %d\n",
error);
return error;
}
return 0;
} else {
error = goodix_configure_dev(ts);
if (error)
return error;
}
return 0;
}
static int goodix_ts_remove(struct i2c_client *client)
{
struct goodix_ts_data *ts = i2c_get_clientdata(client);
if (ts->gpiod_int && ts->gpiod_rst)
wait_for_completion(&ts->firmware_loading_complete);
return 0;
}
static int __maybe_unused goodix_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct goodix_ts_data *ts = i2c_get_clientdata(client);
int error;
/* We need gpio pins to suspend/resume */
if (!ts->gpiod_int || !ts->gpiod_rst)
return 0;
wait_for_completion(&ts->firmware_loading_complete);
/* Free IRQ as IRQ pin is used as output in the suspend sequence */
goodix_free_irq(ts);
/* Output LOW on the INT pin for 5 ms */
error = gpiod_direction_output(ts->gpiod_int, 0);
if (error) {
goodix_request_irq(ts);
return error;
}
usleep_range(5000, 6000);
error = goodix_i2c_write_u8(ts->client, GOODIX_REG_COMMAND,
GOODIX_CMD_SCREEN_OFF);
if (error) {
dev_err(&ts->client->dev, "Screen off command failed\n");
gpiod_direction_input(ts->gpiod_int);
goodix_request_irq(ts);
return -EAGAIN;
}
/*
* The datasheet specifies that the interval between sending screen-off
* command and wake-up should be longer than 58 ms. To avoid waking up
* sooner, delay 58ms here.
*/
msleep(58);
return 0;
}
static int __maybe_unused goodix_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct goodix_ts_data *ts = i2c_get_clientdata(client);
int error;
if (!ts->gpiod_int || !ts->gpiod_rst)
return 0;
/*
* Exit sleep mode by outputting HIGH level to INT pin
* for 2ms~5ms.
*/
error = gpiod_direction_output(ts->gpiod_int, 1);
if (error)
return error;
usleep_range(2000, 5000);
error = goodix_int_sync(ts);
if (error)
return error;
error = goodix_request_irq(ts);
if (error)
return error;
return 0;
}
static SIMPLE_DEV_PM_OPS(goodix_pm_ops, goodix_suspend, goodix_resume);
static const struct i2c_device_id goodix_ts_id[] = {
{ "GDIX1001:00", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt911" },
{ .compatible = "goodix,gt9110" },
{ .compatible = "goodix,gt912" },
{ .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" },
{ .compatible = "goodix,gt967" },
{ }
};
MODULE_DEVICE_TABLE(of, goodix_of_match);
#endif
static struct i2c_driver goodix_ts_driver = {
.probe = goodix_ts_probe,
.remove = goodix_ts_remove,
.id_table = goodix_ts_id,
.driver = {
.name = "Goodix-TS",
.acpi_match_table = ACPI_PTR(goodix_acpi_match),
.of_match_table = of_match_ptr(goodix_of_match),
.pm = &goodix_pm_ops,
},
};
module_i2c_driver(goodix_ts_driver);
MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_DESCRIPTION("Goodix touchscreen driver");
MODULE_LICENSE("GPL v2");
| null | null | null | null | 103,870 |
1,237 |
34,35
|
train_val
|
d0947db40187f4708c58e64cbd6013faf9eddeed
| 1,237 |
Chrome
| 1 |
https://github.com/chromium/chromium
|
2013-04-26 19:49:29+00:00
|
xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
xmlChar limit = 0;
xmlChar *buf = NULL;
xmlChar *rep = NULL;
int len = 0;
int buf_size = 0;
int c, l, in_space = 0;
xmlChar *current = NULL;
xmlEntityPtr ent;
if (NXT(0) == '"') {
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
limit = '"';
NEXT;
} else if (NXT(0) == '\'') {
limit = '\'';
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
NEXT;
} else {
xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL);
return(NULL);
}
/*
* allocate a translation buffer.
*/
buf_size = XML_PARSER_BUFFER_SIZE;
buf = (xmlChar *) xmlMallocAtomic(buf_size * sizeof(xmlChar));
if (buf == NULL) goto mem_error;
/*
* OK loop until we reach one of the ending char or a size limit.
*/
c = CUR_CHAR(l);
while ((NXT(0) != limit) && /* checked *
(IS_CHAR(c)) && (c != '<')) {
if (c == 0) break;
if (c == '&') {
in_space = 0;
if (NXT(1) == '#') {
int val = xmlParseCharRef(ctxt);
if (val == '&') {
if (ctxt->replaceEntities) {
if (len > buf_size - 10) {
growBuffer(buf, 10);
}
buf[len++] = '&';
} else {
/*
* The reparsing will be done in xmlStringGetNodeList()
* called by the attribute() function in SAX.c
*/
if (len > buf_size - 10) {
growBuffer(buf, 10);
}
buf[len++] = '&';
buf[len++] = '#';
buf[len++] = '3';
buf[len++] = '8';
buf[len++] = ';';
}
} else if (val != 0) {
if (len > buf_size - 10) {
growBuffer(buf, 10);
}
len += xmlCopyChar(0, &buf[len], val);
}
} else {
ent = xmlParseEntityRef(ctxt);
ctxt->nbentities++;
if (ent != NULL)
ctxt->nbentities += ent->owner;
if ((ent != NULL) &&
(ent->etype == XML_INTERNAL_PREDEFINED_ENTITY)) {
if (len > buf_size - 10) {
growBuffer(buf, 10);
}
if ((ctxt->replaceEntities == 0) &&
(ent->content[0] == '&')) {
buf[len++] = '&';
buf[len++] = '#';
buf[len++] = '3';
buf[len++] = '8';
buf[len++] = ';';
} else {
buf[len++] = ent->content[0];
}
} else if ((ent != NULL) &&
(ctxt->replaceEntities != 0)) {
if (ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) {
rep = xmlStringDecodeEntities(ctxt, ent->content,
XML_SUBSTITUTE_REF,
0, 0, 0);
if (rep != NULL) {
current = rep;
while (*current != 0) { /* non input consuming */
if ((*current == 0xD) || (*current == 0xA) ||
(*current == 0x9)) {
buf[len++] = 0x20;
current++;
} else
buf[len++] = *current++;
if (len > buf_size - 10) {
growBuffer(buf, 10);
}
}
xmlFree(rep);
rep = NULL;
}
} else {
if (len > buf_size - 10) {
growBuffer(buf, 10);
}
if (ent->content != NULL)
buf[len++] = ent->content[0];
}
} else if (ent != NULL) {
int i = xmlStrlen(ent->name);
const xmlChar *cur = ent->name;
/*
* This may look absurd but is needed to detect
* entities problems
*/
if ((ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) &&
(ent->content != NULL)) {
rep = xmlStringDecodeEntities(ctxt, ent->content,
XML_SUBSTITUTE_REF, 0, 0, 0);
if (rep != NULL) {
xmlFree(rep);
rep = NULL;
}
}
/*
* Just output the reference
*/
buf[len++] = '&';
while (len > buf_size - i - 10) {
growBuffer(buf, i + 10);
}
for (;i > 0;i--)
buf[len++] = *cur++;
buf[len++] = ';';
}
}
} else {
if ((c == 0x20) || (c == 0xD) || (c == 0xA) || (c == 0x9)) {
if ((len != 0) || (!normalize)) {
if ((!normalize) || (!in_space)) {
COPY_BUF(l,buf,len,0x20);
while (len > buf_size - 10) {
growBuffer(buf, 10);
}
}
in_space = 1;
}
} else {
in_space = 0;
COPY_BUF(l,buf,len,c);
if (len > buf_size - 10) {
growBuffer(buf, 10);
}
}
NEXTL(l);
}
GROW;
c = CUR_CHAR(l);
}
if ((in_space) && (normalize)) {
while ((len > 0) && (buf[len - 1] == 0x20)) len--;
}
buf[len] = 0;
if (RAW == '<') {
xmlFatalErr(ctxt, XML_ERR_LT_IN_ATTRIBUTE, NULL);
} else if (RAW != limit) {
if ((c != 0) && (!IS_CHAR(c))) {
xmlFatalErrMsg(ctxt, XML_ERR_INVALID_CHAR,
"invalid character in attribute value\n");
} else {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue: ' expected\n");
}
} else
NEXT;
if (attlen != NULL) *attlen = len;
return(buf);
mem_error:
xmlErrMemory(ctxt, NULL);
if (buf != NULL)
xmlFree(buf);
if (rep != NULL)
xmlFree(rep);
return(NULL);
}
|
CVE-2013-2877
|
CWE-119
|
https://github.com/chromium/chromium/commit/d0947db40187f4708c58e64cbd6013faf9eddeed
|
Low
| 1,237 |
1,010 | null |
train_val
|
04b570817b2b38e35675b17328239746212f4c3f
| 154,067 |
FFmpeg
| 0 |
https://github.com/FFmpeg/FFmpeg
|
2018-06-01 01:23:12+05:30
|
/*
* Copyright (c) 2003 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat API example.
*
* Output a media file in any supported libavformat format. The default
* codecs are used.
* @example muxing.c
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_BICUBIC
// a wrapper around a single output AVStream
typedef struct OutputStream {
AVStream *st;
AVCodecContext *enc;
/* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
} OutputStream;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */
static void add_stream(OutputStream *ost, AVFormatContext *oc,
AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
int i;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
ost->st = avformat_new_stream(oc, NULL);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
ost->st->id = oc->nb_streams-1;
c = avcodec_alloc_context3(*codec);
if (!c) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
ost->enc = c;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
c->sample_fmt = (*codec)->sample_fmts ?
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
if ((*codec)->supported_samplerates) {
c->sample_rate = (*codec)->supported_samplerates[0];
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
if ((*codec)->supported_samplerates[i] == 44100)
c->sample_rate = 44100;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->channel_layout = AV_CH_LAYOUT_STEREO;
if ((*codec)->channel_layouts) {
c->channel_layout = (*codec)->channel_layouts[0];
for (i = 0; (*codec)->channel_layouts[i]; i++) {
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
c->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
ost->st->time_base = (AVRational){ 1, c->sample_rate };
break;
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
c->time_base = ost->st->time_base;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B-frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
/**************************************************************/
/* audio output */
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
uint64_t channel_layout,
int sample_rate, int nb_samples)
{
AVFrame *frame = av_frame_alloc();
int ret;
if (!frame) {
fprintf(stderr, "Error allocating an audio frame\n");
exit(1);
}
frame->format = sample_fmt;
frame->channel_layout = channel_layout;
frame->sample_rate = sample_rate;
frame->nb_samples = nb_samples;
if (nb_samples) {
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Error allocating an audio buffer\n");
exit(1);
}
}
return frame;
}
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
AVCodecContext *c;
int nb_samples;
int ret;
AVDictionary *opt = NULL;
c = ost->enc;
/* open it */
av_dict_copy(&opt, opt_arg, 0);
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
exit(1);
}
/* init signal generator */
ost->t = 0;
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
nb_samples = 10000;
else
nb_samples = c->frame_size;
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
c->sample_rate, nb_samples);
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
c->sample_rate, nb_samples);
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
/* create resampler context */
ost->swr_ctx = swr_alloc();
if (!ost->swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
exit(1);
}
/* set options */
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
/* initialize the resampling context */
if ((ret = swr_init(ost->swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
exit(1);
}
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
* 'nb_channels' channels. */
static AVFrame *get_audio_frame(OutputStream *ost)
{
AVFrame *frame = ost->tmp_frame;
int j, i, v;
int16_t *q = (int16_t*)frame->data[0];
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
for (j = 0; j <frame->nb_samples; j++) {
v = (int)(sin(ost->t) * 10000);
for (i = 0; i < ost->enc->channels; i++)
*q++ = v;
ost->t += ost->tincr;
ost->tincr += ost->tincr2;
}
frame->pts = ost->next_pts;
ost->next_pts += frame->nb_samples;
return frame;
}
/*
* encode one audio frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame;
int ret;
int got_packet;
int dst_nb_samples;
av_init_packet(&pkt);
c = ost->enc;
frame = get_audio_frame(ost);
if (frame) {
/* convert samples from native format to destination codec format, using the resampler */
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
c->sample_rate, c->sample_rate, AV_ROUND_UP);
av_assert0(dst_nb_samples == frame->nb_samples);
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
*/
ret = av_frame_make_writable(ost->frame);
if (ret < 0)
exit(1);
/* convert to destination format */
ret = swr_convert(ost->swr_ctx,
ost->frame->data, dst_nb_samples,
(const uint8_t **)frame->data, frame->nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
exit(1);
}
frame = ost->frame;
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
ost->samples_count += dst_nb_samples;
}
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
if (ret < 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
exit(1);
}
}
return (frame || got_packet) ? 0 : 1;
}
/**************************************************************/
/* video output */
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return picture;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->enc;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
/* open the codec */
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
/* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
if (!ost->tmp_frame) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index,
int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static AVFrame *get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->enc;
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, c->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
/* when we pass a frame to the encoder, it may keep a reference to it
* internally; make sure we do not overwrite it here */
if (av_frame_make_writable(ost->frame) < 0)
exit(1);
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!ost->sws_ctx) {
ost->sws_ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
SCALE_FLAGS, NULL, NULL, NULL);
if (!ost->sws_ctx) {
fprintf(stderr,
"Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
ost->frame->linesize);
} else {
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
}
ost->frame->pts = ost->next_pts++;
return ost->frame;
}
/*
* encode one video frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
int ret;
AVCodecContext *c;
AVFrame *frame;
int got_packet = 0;
AVPacket pkt = { 0 };
c = ost->enc;
frame = get_video_frame(ost);
av_init_packet(&pkt);
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
} else {
ret = 0;
}
if (ret < 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
return (frame || got_packet) ? 0 : 1;
}
static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_free_context(&ost->enc);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
sws_freeContext(ost->sws_ctx);
swr_free(&ost->swr_ctx);
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
OutputStream video_st = { 0 }, audio_st = { 0 };
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVCodec *audio_codec, *video_codec;
int ret;
int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0;
AVDictionary *opt = NULL;
int i;
if (argc < 2) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[0]);
return 1;
}
filename = argv[1];
for (i = 2; i+1 < argc; i+=2) {
if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
}
/* allocate the output media context */
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
if (!oc) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
if (!oc)
return 1;
fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (fmt->video_codec != AV_CODEC_ID_NONE) {
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
have_video = 1;
encode_video = 1;
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
have_audio = 1;
encode_audio = 1;
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (have_video)
open_video(oc, video_codec, &video_st, opt);
if (have_audio)
open_audio(oc, audio_codec, &audio_st, opt);
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(oc, &opt);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return 1;
}
while (encode_video || encode_audio) {
/* select the stream to encode */
if (encode_video &&
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
encode_video = !write_video_frame(oc, &video_st);
} else {
encode_audio = !write_audio_frame(oc, &audio_st);
}
}
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc);
/* Close each codec. */
if (have_video)
close_stream(oc, &video_st);
if (have_audio)
close_stream(oc, &audio_st);
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&oc->pb);
/* free the stream */
avformat_free_context(oc);
return 0;
}
| null | null | null | null | 70,122 |
24,099 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 24,099 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_BROWSER_MEDIA_SESSION_AUDIO_FOCUS_MANAGER_H_
#define CONTENT_BROWSER_MEDIA_SESSION_AUDIO_FOCUS_MANAGER_H_
#include <list>
#include <unordered_map>
#include "base/memory/singleton.h"
#include "content/common/content_export.h"
#include "content/public/browser/web_contents_observer.h"
namespace content {
class MediaSessionImpl;
class CONTENT_EXPORT AudioFocusManager {
public:
enum class AudioFocusType {
Gain,
GainTransientMayDuck,
};
// Returns Chromium's internal AudioFocusManager.
static AudioFocusManager* GetInstance();
void RequestAudioFocus(MediaSessionImpl* media_session, AudioFocusType type);
void AbandonAudioFocus(MediaSessionImpl* media_session);
private:
friend struct base::DefaultSingletonTraits<AudioFocusManager>;
friend class AudioFocusManagerTest;
AudioFocusManager();
~AudioFocusManager();
void MaybeRemoveFocusEntry(MediaSessionImpl* media_session);
// Weak reference of managed MediaSessions. A MediaSession must abandon audio
// foucs before its destruction.
std::list<MediaSessionImpl*> audio_focus_stack_;
};
} // namespace content
#endif // CONTENT_BROWSER_MEDIA_SESSION_AUDIO_FOCUS_MANAGER_H_
| null | null | null | null | 20,962 |
19,328 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 184,323 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Linux ARCnet driver - COM20020 chipset support - function declarations
*
* Written 1997 by David Woodhouse.
* Written 1994-1999 by Avery Pennarun.
* Derived from skeleton.c by Donald Becker.
*
* Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
* for sponsoring the further development of this driver.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#ifndef __COM20020_H
#define __COM20020_H
#include <linux/leds.h>
int com20020_check(struct net_device *dev);
int com20020_found(struct net_device *dev, int shared);
extern const struct net_device_ops com20020_netdev_ops;
/* The number of low I/O ports used by the card. */
#define ARCNET_TOTAL_SIZE 8
#define PLX_PCI_MAX_CARDS 2
struct ledoffsets {
int green;
int red;
};
struct com20020_pci_channel_map {
u32 bar;
u32 offset;
u32 size; /* 0x00 - auto, e.g. length of entire bar */
};
struct com20020_pci_card_info {
const char *name;
int devcount;
struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
struct com20020_pci_channel_map misc_map;
struct ledoffsets leds[PLX_PCI_MAX_CARDS];
int rotary;
unsigned int flags;
};
struct com20020_priv {
struct com20020_pci_card_info *ci;
struct list_head list_dev;
resource_size_t misc;
};
struct com20020_dev {
struct list_head list;
struct net_device *dev;
struct led_classdev tx_led;
struct led_classdev recon_led;
struct com20020_priv *pci_priv;
int index;
};
#define COM20020_REG_W_INTMASK 0 /* writable */
#define COM20020_REG_R_STATUS 0 /* readable */
#define COM20020_REG_W_COMMAND 1 /* standard arcnet commands */
#define COM20020_REG_R_DIAGSTAT 1 /* diagnostic status */
#define COM20020_REG_W_ADDR_HI 2 /* control for IO-mapped memory */
#define COM20020_REG_W_ADDR_LO 3
#define COM20020_REG_RW_MEMDATA 4 /* data port for IO-mapped memory */
#define COM20020_REG_W_SUBADR 5 /* the extended port _XREG refers to */
#define COM20020_REG_W_CONFIG 6 /* configuration */
#define COM20020_REG_W_XREG 7 /* extra
* (indexed by _CONFIG or _SUBADDR)
*/
/* in the ADDR_HI register */
#define RDDATAflag 0x80 /* next access is a read (not a write) */
/* in the DIAGSTAT register */
#define NEWNXTIDflag 0x02 /* ID to which token is passed has changed */
/* in the CONFIG register */
#define RESETcfg 0x80 /* put card in reset state */
#define TXENcfg 0x20 /* enable TX */
#define XTOcfg(x) ((x) << 3) /* extended timeout */
/* in SETUP register */
#define PROMISCset 0x10 /* enable RCV_ALL */
#define P1MODE 0x80 /* enable P1-MODE for Backplane */
#define SLOWARB 0x01 /* enable Slow Arbitration for >=5Mbps */
/* COM2002x */
#define SUB_TENTATIVE 0 /* tentative node ID */
#define SUB_NODE 1 /* node ID */
#define SUB_SETUP1 2 /* various options */
#define SUB_TEST 3 /* test/diag register */
/* COM20022 only */
#define SUB_SETUP2 4 /* sundry options */
#define SUB_BUSCTL 5 /* bus control options */
#define SUB_DMACOUNT 6 /* DMA count options */
static inline void com20020_set_subaddress(struct arcnet_local *lp,
int ioaddr, int val)
{
if (val < 4) {
lp->config = (lp->config & ~0x03) | val;
arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
} else {
arcnet_outb(val, ioaddr, COM20020_REG_W_SUBADR);
}
}
#endif /* __COM20020_H */
| null | null | null | null | 92,670 |
30,379 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 30,379 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/*
* Copyright (C) 2009 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_COLOR_H_
#define THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_COLOR_H_
namespace blink {
typedef unsigned WebColor; // RGBA quad. Equivalent to SkColor.
} // namespace blink
#endif
| null | null | null | null | 27,242 |
33,357 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 33,357 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/dom/dom_node_ids.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
namespace blink {
DEFINE_WEAK_IDENTIFIER_MAP(Node, DOMNodeId);
// static
DOMNodeId DOMNodeIds::IdForNode(Node* node) {
return WeakIdentifierMap<Node, DOMNodeId>::Identifier(node);
}
// static
Node* DOMNodeIds::NodeForId(DOMNodeId id) {
return WeakIdentifierMap<Node, DOMNodeId>::Lookup(id);
}
} // namespace blink
| null | null | null | null | 30,220 |
18,167 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 183,162 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright (c) 2012 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*#include "core_priv.h"*/
#include "mlx4_ib.h"
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <rdma/ib_mad.h>
/*show_admin_alias_guid returns the administratively assigned value of that GUID.
* Values returned in buf parameter string:
* 0 - requests opensm to assign a value.
* ffffffffffffffff - delete this entry.
* other - value assigned by administrator.
*/
static ssize_t show_admin_alias_guid(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
__be64 sysadmin_ag_val;
sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev,
mlx4_ib_iov_dentry->entry_num,
port->num);
return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
}
/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
* Values in buf parameter string:
* 0 - requests opensm to assign a value.
* 0xffffffffffffffff - delete this entry.
* other - guid value assigned by the administrator.
*/
static ssize_t store_admin_alias_guid(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int record_num;/*0-15*/
int guid_index_in_rec; /*0 - 7*/
struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
u64 sysadmin_ag_val;
unsigned long flags;
record_num = mlx4_ib_iov_dentry->entry_num / 8;
guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8;
if (0 == record_num && 0 == guid_index_in_rec) {
pr_err("GUID 0 block 0 is RO\n");
return count;
}
spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags);
sscanf(buf, "%llx", &sysadmin_ag_val);
*(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1].
all_rec_per_port[record_num].
all_recs[GUID_REC_SIZE * guid_index_in_rec] =
cpu_to_be64(sysadmin_ag_val);
/* Change the state to be pending for update */
mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status
= MLX4_GUID_INFO_STATUS_IDLE ;
mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val),
mlx4_ib_iov_dentry->entry_num,
port->num);
/* set the record index */
mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes
|= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags);
mlx4_ib_init_alias_guid_work(mdev, port->num - 1);
return count;
}
static ssize_t show_port_gid(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
union ib_gid gid;
ssize_t ret;
ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num,
mlx4_ib_iov_dentry->entry_num, &gid, 1);
if (ret)
return ret;
ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
be16_to_cpu(((__be16 *) gid.raw)[0]),
be16_to_cpu(((__be16 *) gid.raw)[1]),
be16_to_cpu(((__be16 *) gid.raw)[2]),
be16_to_cpu(((__be16 *) gid.raw)[3]),
be16_to_cpu(((__be16 *) gid.raw)[4]),
be16_to_cpu(((__be16 *) gid.raw)[5]),
be16_to_cpu(((__be16 *) gid.raw)[6]),
be16_to_cpu(((__be16 *) gid.raw)[7]));
return ret;
}
static ssize_t show_phys_port_pkey(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
u16 pkey;
ssize_t ret;
ret = __mlx4_ib_query_pkey(&mdev->ib_dev, port->num,
mlx4_ib_iov_dentry->entry_num, &pkey, 1);
if (ret)
return ret;
return sprintf(buf, "0x%04x\n", pkey);
}
#define DENTRY_REMOVE(_dentry) \
do { \
sysfs_remove_file((_dentry)->kobj, &(_dentry)->dentry.attr); \
} while (0);
static int create_sysfs_entry(void *_ctx, struct mlx4_ib_iov_sysfs_attr *_dentry,
char *_name, struct kobject *_kobj,
ssize_t (*show)(struct device *dev,
struct device_attribute *attr,
char *buf),
ssize_t (*store)(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
)
{
int ret = 0;
struct mlx4_ib_iov_sysfs_attr *vdentry = _dentry;
vdentry->ctx = _ctx;
vdentry->dentry.show = show;
vdentry->dentry.store = store;
sysfs_attr_init(&vdentry->dentry.attr);
vdentry->dentry.attr.name = vdentry->name;
vdentry->dentry.attr.mode = 0;
vdentry->kobj = _kobj;
snprintf(vdentry->name, 15, "%s", _name);
if (vdentry->dentry.store)
vdentry->dentry.attr.mode |= S_IWUSR;
if (vdentry->dentry.show)
vdentry->dentry.attr.mode |= S_IRUGO;
ret = sysfs_create_file(vdentry->kobj, &vdentry->dentry.attr);
if (ret) {
pr_err("failed to create %s\n", vdentry->dentry.attr.name);
vdentry->ctx = NULL;
return ret;
}
return ret;
}
int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
struct attribute *attr)
{
struct mlx4_ib_iov_port *port = &device->iov_ports[port_num - 1];
int ret;
ret = sysfs_create_file(port->mcgs_parent, attr);
if (ret)
pr_err("failed to create %s\n", attr->name);
return ret;
}
void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
struct attribute *attr)
{
struct mlx4_ib_iov_port *port = &device->iov_ports[port_num - 1];
sysfs_remove_file(port->mcgs_parent, attr);
}
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
char buff[10];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;
memset(&attr, 0, sizeof(attr));
/* get the physical gid and pkey table sizes.*/
ret = __mlx4_ib_query_port(&device->ib_dev, port_num, &attr, 1);
if (ret)
goto err;
port = &device->iov_ports[port_num - 1];
port->dev = device;
port->num = port_num;
/* Directory structure:
* iov -
* port num -
* admin_guids
* gids (operational)
* mcg_table
*/
port->dentr_ar = kzalloc(sizeof (struct mlx4_ib_iov_sysfs_attr_ar),
GFP_KERNEL);
if (!port->dentr_ar) {
ret = -ENOMEM;
goto err;
}
sprintf(buff, "%d", port_num);
port->cur_port = kobject_create_and_add(buff,
kobject_get(device->ports_parent));
if (!port->cur_port) {
ret = -ENOMEM;
goto kobj_create_err;
}
/* admin GUIDs */
port->admin_alias_parent = kobject_create_and_add("admin_guids",
kobject_get(port->cur_port));
if (!port->admin_alias_parent) {
ret = -ENOMEM;
goto err_admin_guids;
}
for (i = 0 ; i < attr.gid_tbl_len; i++) {
sprintf(buff, "%d", i);
port->dentr_ar->dentries[i].entry_num = i;
ret = create_sysfs_entry(port, &port->dentr_ar->dentries[i],
buff, port->admin_alias_parent,
show_admin_alias_guid, store_admin_alias_guid);
if (ret)
goto err_admin_alias_parent;
}
/* gids subdirectory (operational gids) */
port->gids_parent = kobject_create_and_add("gids",
kobject_get(port->cur_port));
if (!port->gids_parent) {
ret = -ENOMEM;
goto err_gids;
}
for (i = 0 ; i < attr.gid_tbl_len; i++) {
sprintf(buff, "%d", i);
port->dentr_ar->dentries[attr.gid_tbl_len + i].entry_num = i;
ret = create_sysfs_entry(port,
&port->dentr_ar->dentries[attr.gid_tbl_len + i],
buff,
port->gids_parent, show_port_gid, NULL);
if (ret)
goto err_gids_parent;
}
/* physical port pkey table */
port->pkeys_parent =
kobject_create_and_add("pkeys", kobject_get(port->cur_port));
if (!port->pkeys_parent) {
ret = -ENOMEM;
goto err_pkeys;
}
for (i = 0 ; i < attr.pkey_tbl_len; i++) {
sprintf(buff, "%d", i);
port->dentr_ar->dentries[2 * attr.gid_tbl_len + i].entry_num = i;
ret = create_sysfs_entry(port,
&port->dentr_ar->dentries[2 * attr.gid_tbl_len + i],
buff, port->pkeys_parent,
show_phys_port_pkey, NULL);
if (ret)
goto err_pkeys_parent;
}
/* MCGs table */
port->mcgs_parent =
kobject_create_and_add("mcgs", kobject_get(port->cur_port));
if (!port->mcgs_parent) {
ret = -ENOMEM;
goto err_mcgs;
}
return 0;
err_mcgs:
kobject_put(port->cur_port);
err_pkeys_parent:
kobject_put(port->pkeys_parent);
err_pkeys:
kobject_put(port->cur_port);
err_gids_parent:
kobject_put(port->gids_parent);
err_gids:
kobject_put(port->cur_port);
err_admin_alias_parent:
kobject_put(port->admin_alias_parent);
err_admin_guids:
kobject_put(port->cur_port);
kobject_put(port->cur_port); /* once more for create_and_add buff */
kobj_create_err:
kobject_put(device->ports_parent);
kfree(port->dentr_ar);
err:
pr_err("add_port_entries FAILED: for port:%d, error: %d\n",
port_num, ret);
return ret;
}
static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
{
char base_name[9];
/* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
strlcpy(name, pci_name(dev->dev->persist->pdev), max);
strncpy(base_name, name, 8); /*till xxxx:yy:*/
base_name[8] = '\0';
/* with no ARI only 3 last bits are used so when the fn is higher than 8
* need to add it to the dev num, so count in the last number will be
* modulo 8 */
sprintf(name, "%s%.2d.%d", base_name, (i/8), (i%8));
}
struct mlx4_port {
struct kobject kobj;
struct mlx4_ib_dev *dev;
struct attribute_group pkey_group;
struct attribute_group gid_group;
struct device_attribute enable_smi_admin;
struct device_attribute smi_enabled;
int slave;
u8 port_num;
};
static void mlx4_port_release(struct kobject *kobj)
{
struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
struct attribute *a;
int i;
for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
kfree(a);
kfree(p->pkey_group.attrs);
for (i = 0; (a = p->gid_group.attrs[i]); ++i)
kfree(a);
kfree(p->gid_group.attrs);
kfree(p);
}
struct port_attribute {
struct attribute attr;
ssize_t (*show)(struct mlx4_port *, struct port_attribute *, char *buf);
ssize_t (*store)(struct mlx4_port *, struct port_attribute *,
const char *buf, size_t count);
};
static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct port_attribute *port_attr =
container_of(attr, struct port_attribute, attr);
struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
if (!port_attr->show)
return -EIO;
return port_attr->show(p, port_attr, buf);
}
static ssize_t port_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t size)
{
struct port_attribute *port_attr =
container_of(attr, struct port_attribute, attr);
struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
if (!port_attr->store)
return -EIO;
return port_attr->store(p, port_attr, buf, size);
}
static const struct sysfs_ops port_sysfs_ops = {
.show = port_attr_show,
.store = port_attr_store,
};
static struct kobj_type port_type = {
.release = mlx4_port_release,
.sysfs_ops = &port_sysfs_ops,
};
struct port_table_attribute {
struct port_attribute attr;
char name[8];
int index;
};
static ssize_t show_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
char *buf)
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
ssize_t ret = -ENODEV;
if (p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index] >=
(p->dev->dev->caps.pkey_table_len[p->port_num]))
ret = sprintf(buf, "none\n");
else
ret = sprintf(buf, "%d\n",
p->dev->pkeys.virt2phys_pkey[p->slave]
[p->port_num - 1][tab_attr->index]);
return ret;
}
static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
const char *buf, size_t count)
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
int idx;
int err;
/* do not allow remapping Dom0 virtual pkey table */
if (p->slave == mlx4_master_func_num(p->dev->dev))
return -EINVAL;
if (!strncasecmp(buf, "no", 2))
idx = p->dev->dev->phys_caps.pkey_phys_table_len[p->port_num] - 1;
else if (sscanf(buf, "%i", &idx) != 1 ||
idx >= p->dev->dev->caps.pkey_table_len[p->port_num] ||
idx < 0)
return -EINVAL;
p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1]
[tab_attr->index] = idx;
mlx4_sync_pkey_table(p->dev->dev, p->slave, p->port_num,
tab_attr->index, idx);
err = mlx4_gen_pkey_eqe(p->dev->dev, p->slave, p->port_num);
if (err) {
pr_err("mlx4_gen_pkey_eqe failed for slave %d,"
" port %d, index %d\n", p->slave, p->port_num, idx);
return err;
}
return count;
}
static ssize_t show_port_gid_idx(struct mlx4_port *p,
struct port_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", p->slave);
}
static struct attribute **
alloc_group_attrs(ssize_t (*show)(struct mlx4_port *,
struct port_attribute *, char *buf),
ssize_t (*store)(struct mlx4_port *, struct port_attribute *,
const char *buf, size_t count),
int len)
{
struct attribute **tab_attr;
struct port_table_attribute *element;
int i;
tab_attr = kcalloc(1 + len, sizeof (struct attribute *), GFP_KERNEL);
if (!tab_attr)
return NULL;
for (i = 0; i < len; i++) {
element = kzalloc(sizeof (struct port_table_attribute),
GFP_KERNEL);
if (!element)
goto err;
if (snprintf(element->name, sizeof (element->name),
"%d", i) >= sizeof (element->name)) {
kfree(element);
goto err;
}
sysfs_attr_init(&element->attr.attr);
element->attr.attr.name = element->name;
if (store) {
element->attr.attr.mode = S_IWUSR | S_IRUGO;
element->attr.store = store;
} else
element->attr.attr.mode = S_IRUGO;
element->attr.show = show;
element->index = i;
tab_attr[i] = &element->attr.attr;
}
return tab_attr;
err:
while (--i >= 0)
kfree(tab_attr[i]);
kfree(tab_attr);
return NULL;
}
static ssize_t sysfs_show_smi_enabled(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mlx4_port *p =
container_of(attr, struct mlx4_port, smi_enabled);
ssize_t len = 0;
if (mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num))
len = sprintf(buf, "%d\n", 1);
else
len = sprintf(buf, "%d\n", 0);
return len;
}
static ssize_t sysfs_show_enable_smi_admin(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlx4_port *p =
container_of(attr, struct mlx4_port, enable_smi_admin);
ssize_t len = 0;
if (mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num))
len = sprintf(buf, "%d\n", 1);
else
len = sprintf(buf, "%d\n", 0);
return len;
}
static ssize_t sysfs_store_enable_smi_admin(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mlx4_port *p =
container_of(attr, struct mlx4_port, enable_smi_admin);
int enable;
if (sscanf(buf, "%i", &enable) != 1 ||
enable < 0 || enable > 1)
return -EINVAL;
if (mlx4_vf_set_enable_smi_admin(p->dev->dev, p->slave, p->port_num, enable))
return -EINVAL;
return count;
}
static int add_vf_smi_entries(struct mlx4_port *p)
{
int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
IB_LINK_LAYER_ETHERNET;
int ret;
/* do not display entries if eth transport, or if master */
if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
return 0;
sysfs_attr_init(&p->smi_enabled.attr);
p->smi_enabled.show = sysfs_show_smi_enabled;
p->smi_enabled.store = NULL;
p->smi_enabled.attr.name = "smi_enabled";
p->smi_enabled.attr.mode = 0444;
ret = sysfs_create_file(&p->kobj, &p->smi_enabled.attr);
if (ret) {
pr_err("failed to create smi_enabled\n");
return ret;
}
sysfs_attr_init(&p->enable_smi_admin.attr);
p->enable_smi_admin.show = sysfs_show_enable_smi_admin;
p->enable_smi_admin.store = sysfs_store_enable_smi_admin;
p->enable_smi_admin.attr.name = "enable_smi_admin";
p->enable_smi_admin.attr.mode = 0644;
ret = sysfs_create_file(&p->kobj, &p->enable_smi_admin.attr);
if (ret) {
pr_err("failed to create enable_smi_admin\n");
sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
return ret;
}
return 0;
}
static void remove_vf_smi_entries(struct mlx4_port *p)
{
int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
IB_LINK_LAYER_ETHERNET;
if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
return;
sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
sysfs_remove_file(&p->kobj, &p->enable_smi_admin.attr);
}
static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
{
struct mlx4_port *p;
int i;
int ret;
int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
IB_LINK_LAYER_ETHERNET;
p = kzalloc(sizeof *p, GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
p->port_num = port_num;
p->slave = slave;
ret = kobject_init_and_add(&p->kobj, &port_type,
kobject_get(dev->dev_ports_parent[slave]),
"%d", port_num);
if (ret)
goto err_alloc;
p->pkey_group.name = "pkey_idx";
p->pkey_group.attrs =
alloc_group_attrs(show_port_pkey,
is_eth ? NULL : store_port_pkey,
dev->dev->caps.pkey_table_len[port_num]);
if (!p->pkey_group.attrs) {
ret = -ENOMEM;
goto err_alloc;
}
ret = sysfs_create_group(&p->kobj, &p->pkey_group);
if (ret)
goto err_free_pkey;
p->gid_group.name = "gid_idx";
p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1);
if (!p->gid_group.attrs) {
ret = -ENOMEM;
goto err_free_pkey;
}
ret = sysfs_create_group(&p->kobj, &p->gid_group);
if (ret)
goto err_free_gid;
ret = add_vf_smi_entries(p);
if (ret)
goto err_free_gid;
list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]);
return 0;
err_free_gid:
kfree(p->gid_group.attrs[0]);
kfree(p->gid_group.attrs);
err_free_pkey:
for (i = 0; i < dev->dev->caps.pkey_table_len[port_num]; ++i)
kfree(p->pkey_group.attrs[i]);
kfree(p->pkey_group.attrs);
err_alloc:
kobject_put(dev->dev_ports_parent[slave]);
kfree(p);
return ret;
}
static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
{
char name[32];
int err;
int port;
struct kobject *p, *t;
struct mlx4_port *mport;
struct mlx4_active_ports actv_ports;
get_name(dev, name, slave, sizeof name);
dev->pkeys.device_parent[slave] =
kobject_create_and_add(name, kobject_get(dev->iov_parent));
if (!dev->pkeys.device_parent[slave]) {
err = -ENOMEM;
goto fail_dev;
}
INIT_LIST_HEAD(&dev->pkeys.pkey_port_list[slave]);
dev->dev_ports_parent[slave] =
kobject_create_and_add("ports",
kobject_get(dev->pkeys.device_parent[slave]));
if (!dev->dev_ports_parent[slave]) {
err = -ENOMEM;
goto err_ports;
}
actv_ports = mlx4_get_active_ports(dev->dev, slave);
for (port = 1; port <= dev->dev->caps.num_ports; ++port) {
if (!test_bit(port - 1, actv_ports.ports))
continue;
err = add_port(dev, port, slave);
if (err)
goto err_add;
}
return 0;
err_add:
list_for_each_entry_safe(p, t,
&dev->pkeys.pkey_port_list[slave],
entry) {
list_del(&p->entry);
mport = container_of(p, struct mlx4_port, kobj);
sysfs_remove_group(p, &mport->pkey_group);
sysfs_remove_group(p, &mport->gid_group);
remove_vf_smi_entries(mport);
kobject_put(p);
}
kobject_put(dev->dev_ports_parent[slave]);
err_ports:
kobject_put(dev->pkeys.device_parent[slave]);
/* extra put for the device_parent create_and_add */
kobject_put(dev->pkeys.device_parent[slave]);
fail_dev:
kobject_put(dev->iov_parent);
return err;
}
static int register_pkey_tree(struct mlx4_ib_dev *device)
{
int i;
if (!mlx4_is_master(device->dev))
return 0;
for (i = 0; i <= device->dev->persist->num_vfs; ++i)
register_one_pkey_tree(device, i);
return 0;
}
static void unregister_pkey_tree(struct mlx4_ib_dev *device)
{
int slave;
struct kobject *p, *t;
struct mlx4_port *port;
if (!mlx4_is_master(device->dev))
return;
for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
list_for_each_entry_safe(p, t,
&device->pkeys.pkey_port_list[slave],
entry) {
list_del(&p->entry);
port = container_of(p, struct mlx4_port, kobj);
sysfs_remove_group(p, &port->pkey_group);
sysfs_remove_group(p, &port->gid_group);
remove_vf_smi_entries(port);
kobject_put(p);
kobject_put(device->dev_ports_parent[slave]);
}
kobject_put(device->dev_ports_parent[slave]);
kobject_put(device->pkeys.device_parent[slave]);
kobject_put(device->pkeys.device_parent[slave]);
kobject_put(device->iov_parent);
}
}
int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
{
int i;
int ret = 0;
if (!mlx4_is_master(dev->dev))
return 0;
dev->iov_parent =
kobject_create_and_add("iov",
kobject_get(dev->ib_dev.ports_parent->parent));
if (!dev->iov_parent) {
ret = -ENOMEM;
goto err;
}
dev->ports_parent =
kobject_create_and_add("ports",
kobject_get(dev->iov_parent));
if (!dev->ports_parent) {
ret = -ENOMEM;
goto err_ports;
}
for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) {
ret = add_port_entries(dev, i);
if (ret)
goto err_add_entries;
}
ret = register_pkey_tree(dev);
if (ret)
goto err_add_entries;
return 0;
err_add_entries:
kobject_put(dev->ports_parent);
err_ports:
kobject_put(dev->iov_parent);
err:
kobject_put(dev->ib_dev.ports_parent->parent);
pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret);
return ret;
}
static void unregister_alias_guid_tree(struct mlx4_ib_dev *device)
{
struct mlx4_ib_iov_port *p;
int i;
if (!mlx4_is_master(device->dev))
return;
for (i = 0; i < device->dev->caps.num_ports; i++) {
p = &device->iov_ports[i];
kobject_put(p->admin_alias_parent);
kobject_put(p->gids_parent);
kobject_put(p->pkeys_parent);
kobject_put(p->mcgs_parent);
kobject_put(p->cur_port);
kobject_put(p->cur_port);
kobject_put(p->cur_port);
kobject_put(p->cur_port);
kobject_put(p->cur_port);
kobject_put(p->dev->ports_parent);
kfree(p->dentr_ar);
}
}
void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device)
{
unregister_alias_guid_tree(device);
unregister_pkey_tree(device);
kobject_put(device->ports_parent);
kobject_put(device->iov_parent);
kobject_put(device->iov_parent);
kobject_put(device->ib_dev.ports_parent->parent);
}
| null | null | null | null | 91,509 |
16,587 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 16,587 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/invalidation/impl/ticl_profile_settings_provider.h"
#include <memory>
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/threading/thread_task_runner_handle.h"
#include "components/gcm_driver/fake_gcm_driver.h"
#include "components/gcm_driver/gcm_channel_status_syncer.h"
#include "components/invalidation/impl/fake_invalidation_state_tracker.h"
#include "components/invalidation/impl/invalidation_prefs.h"
#include "components/invalidation/impl/invalidation_state_tracker.h"
#include "components/invalidation/impl/profile_invalidation_provider.h"
#include "components/invalidation/impl/ticl_invalidation_service.h"
#include "components/invalidation/impl/ticl_settings_provider.h"
#include "components/prefs/pref_service.h"
#include "components/sync_preferences/testing_pref_service_syncable.h"
#include "google_apis/gaia/fake_identity_provider.h"
#include "google_apis/gaia/fake_oauth2_token_service.h"
#include "google_apis/gaia/identity_provider.h"
#include "net/url_request/url_request_context_getter.h"
#include "net/url_request/url_request_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace invalidation {
class TiclProfileSettingsProviderTest : public testing::Test {
protected:
TiclProfileSettingsProviderTest();
~TiclProfileSettingsProviderTest() override;
// testing::Test:
void SetUp() override;
void TearDown() override;
TiclInvalidationService::InvalidationNetworkChannel GetNetworkChannel();
base::MessageLoop message_loop_;
scoped_refptr<net::TestURLRequestContextGetter> request_context_getter_;
gcm::FakeGCMDriver gcm_driver_;
sync_preferences::TestingPrefServiceSyncable pref_service_;
FakeOAuth2TokenService token_service_;
std::unique_ptr<TiclInvalidationService> invalidation_service_;
private:
DISALLOW_COPY_AND_ASSIGN(TiclProfileSettingsProviderTest);
};
TiclProfileSettingsProviderTest::TiclProfileSettingsProviderTest() {}
TiclProfileSettingsProviderTest::~TiclProfileSettingsProviderTest() {}
void TiclProfileSettingsProviderTest::SetUp() {
gcm::GCMChannelStatusSyncer::RegisterProfilePrefs(pref_service_.registry());
ProfileInvalidationProvider::RegisterProfilePrefs(pref_service_.registry());
request_context_getter_ =
new net::TestURLRequestContextGetter(base::ThreadTaskRunnerHandle::Get());
invalidation_service_.reset(new TiclInvalidationService(
"TestUserAgent", std::unique_ptr<IdentityProvider>(
new FakeIdentityProvider(&token_service_)),
std::unique_ptr<TiclSettingsProvider>(
new TiclProfileSettingsProvider(&pref_service_)),
&gcm_driver_, request_context_getter_));
invalidation_service_->Init(std::unique_ptr<syncer::InvalidationStateTracker>(
new syncer::FakeInvalidationStateTracker));
}
void TiclProfileSettingsProviderTest::TearDown() {
invalidation_service_.reset();
}
TiclInvalidationService::InvalidationNetworkChannel
TiclProfileSettingsProviderTest::GetNetworkChannel() {
return invalidation_service_->network_channel_type_;
}
TEST_F(TiclProfileSettingsProviderTest, ChannelSelectionTest) {
// Default value should be GCM channel.
EXPECT_EQ(TiclInvalidationService::GCM_NETWORK_CHANNEL, GetNetworkChannel());
// If GCM is enabled and invalidation channel setting is not set or set to
// true then use GCM channel.
pref_service_.SetBoolean(gcm::prefs::kGCMChannelStatus, true);
pref_service_.SetBoolean(prefs::kInvalidationServiceUseGCMChannel, true);
EXPECT_EQ(TiclInvalidationService::GCM_NETWORK_CHANNEL, GetNetworkChannel());
pref_service_.SetBoolean(gcm::prefs::kGCMChannelStatus, true);
pref_service_.ClearPref(prefs::kInvalidationServiceUseGCMChannel);
EXPECT_EQ(TiclInvalidationService::GCM_NETWORK_CHANNEL, GetNetworkChannel());
pref_service_.ClearPref(gcm::prefs::kGCMChannelStatus);
pref_service_.SetBoolean(prefs::kInvalidationServiceUseGCMChannel, true);
EXPECT_EQ(TiclInvalidationService::GCM_NETWORK_CHANNEL, GetNetworkChannel());
// If invalidation channel setting says use GCM but GCM is not enabled, do not
// fall back to push channel.
pref_service_.SetBoolean(gcm::prefs::kGCMChannelStatus, false);
pref_service_.SetBoolean(prefs::kInvalidationServiceUseGCMChannel, true);
EXPECT_EQ(TiclInvalidationService::GCM_NETWORK_CHANNEL, GetNetworkChannel());
// If invalidation channel setting is set to false, fall back to push channel.
pref_service_.SetBoolean(gcm::prefs::kGCMChannelStatus, true);
pref_service_.SetBoolean(prefs::kInvalidationServiceUseGCMChannel, false);
EXPECT_EQ(TiclInvalidationService::PUSH_CLIENT_CHANNEL, GetNetworkChannel());
}
} // namespace invalidation
| null | null | null | null | 13,450 |
24,730 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 189,725 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pad.h"
#include "aux.h"
#include "bus.h"
void
g94_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
struct nvkm_subdev *subdev = &pad->i2c->subdev;
struct nvkm_device *device = subdev->device;
const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
switch (mode) {
case NVKM_I2C_PAD_OFF:
nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000001);
break;
case NVKM_I2C_PAD_I2C:
nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x0000c001);
nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
break;
case NVKM_I2C_PAD_AUX:
nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x00000002);
nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
break;
default:
WARN_ON(1);
break;
}
}
static const struct nvkm_i2c_pad_func
g94_i2c_pad_s_func = {
.bus_new_4 = nv50_i2c_bus_new,
.aux_new_6 = g94_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
int
g94_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&g94_i2c_pad_s_func, i2c, id, ppad);
}
static const struct nvkm_i2c_pad_func
g94_i2c_pad_x_func = {
.bus_new_4 = nv50_i2c_bus_new,
.aux_new_6 = g94_i2c_aux_new,
};
int
g94_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&g94_i2c_pad_x_func, i2c, id, ppad);
}
| null | null | null | null | 98,072 |
30,598 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 30,598 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_PUBLIC_PLATFORM_MODULES_PUSH_MESSAGING_WEB_PUSH_CLIENT_H_
#define THIRD_PARTY_BLINK_PUBLIC_PLATFORM_MODULES_PUSH_MESSAGING_WEB_PUSH_CLIENT_H_
#include <memory>
#include "third_party/blink/public/platform/modules/push_messaging/web_push_error.h"
#include "third_party/blink/public/platform/modules/push_messaging/web_push_provider.h"
#include "third_party/blink/public/platform/web_callbacks.h"
namespace blink {
class WebServiceWorkerRegistration;
struct WebPushSubscriptionOptions;
class WebPushClient {
public:
virtual ~WebPushClient() = default;
// Ownership of the WebServiceWorkerRegistration is not transferred.
// Ownership of the callbacks is transferred to the client.
virtual void Subscribe(WebServiceWorkerRegistration*,
const WebPushSubscriptionOptions&,
bool user_gesture,
std::unique_ptr<WebPushSubscriptionCallbacks>) = 0;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_PUBLIC_PLATFORM_MODULES_PUSH_MESSAGING_WEB_PUSH_CLIENT_H_
| null | null | null | null | 27,461 |
6,086 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 6,086 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMEOS_COMPONENTS_TETHER_CONNECTION_PRESERVER_IMPL_H_
#define CHROMEOS_COMPONENTS_TETHER_CONNECTION_PRESERVER_IMPL_H_
#include <memory>
#include "base/timer/timer.h"
#include "chromeos/components/tether/active_host.h"
#include "chromeos/components/tether/connection_preserver.h"
namespace chromeos {
class NetworkStateHandler;
namespace tether {
class BleConnectionManager;
class TetherHostResponseRecorder;
// Concrete implementation of ConnectionPreserver.
class ConnectionPreserverImpl : public ConnectionPreserver,
public ActiveHost::Observer {
public:
// The maximum duration of time that a BLE Connection should be preserved.
// A preserved BLE Connection will be torn down if not used within this time.
// If the connection is used for a host connection before this time runs out,
// the Connection will be torn down.
static constexpr const uint32_t kTimeoutSeconds = 60;
ConnectionPreserverImpl(
BleConnectionManager* ble_connection_manager,
NetworkStateHandler* network_state_handler,
ActiveHost* active_host,
TetherHostResponseRecorder* tether_host_response_recorder);
~ConnectionPreserverImpl() override;
// ConnectionPreserver:
void HandleSuccessfulTetherAvailabilityResponse(
const std::string& device_id) override;
protected:
// ActiveHost::Observer:
void OnActiveHostChanged(
const ActiveHost::ActiveHostChangeInfo& change_info) override;
private:
friend class ConnectionPreserverImplTest;
bool IsConnectedToInternet();
// Between |preserved_connection_device_id_| and |device_id|, return which is
// the "preferred" preserved Connection, i.e., which is higher priority.
std::string GetPreferredPreservedConnectionDeviceId(
const std::string& device_id);
void SetPreservedConnection(const std::string& device_id);
void RemovePreservedConnectionIfPresent();
void SetTimerForTesting(std::unique_ptr<base::Timer> timer_for_test);
BleConnectionManager* ble_connection_manager_;
NetworkStateHandler* network_state_handler_;
ActiveHost* active_host_;
TetherHostResponseRecorder* tether_host_response_recorder_;
std::unique_ptr<base::Timer> preserved_connection_timer_;
std::string preserved_connection_device_id_;
base::WeakPtrFactory<ConnectionPreserverImpl> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(ConnectionPreserverImpl);
};
} // namespace tether
} // namespace chromeos
#endif // CHROMEOS_COMPONENTS_TETHER_CONNECTION_PRESERVER_IMPL_H_
| null | null | null | null | 2,949 |
36,670 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 36,670 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/*
* Copyright (C) 2010 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "third_party/blink/renderer/platform/image-decoders/webp/webp_image_decoder.h"
#include "build/build_config.h"
#include "third_party/skia/include/core/SkData.h"
#if defined(ARCH_CPU_BIG_ENDIAN)
#error Blink assumes a little-endian target.
#endif
#if SK_B32_SHIFT // Output little-endian RGBA pixels (Android).
inline WEBP_CSP_MODE outputMode(bool hasAlpha) {
return hasAlpha ? MODE_rgbA : MODE_RGBA;
}
#else // Output little-endian BGRA pixels.
inline WEBP_CSP_MODE outputMode(bool hasAlpha) {
return hasAlpha ? MODE_bgrA : MODE_BGRA;
}
#endif
namespace {
// Returns two point ranges (<left, width> pairs) at row |canvasY| which belong
// to |src| but not |dst|. A range is empty if its width is 0.
inline void findBlendRangeAtRow(const blink::IntRect& src,
const blink::IntRect& dst,
int canvasY,
int& left1,
int& width1,
int& left2,
int& width2) {
SECURITY_DCHECK(canvasY >= src.Y() && canvasY < src.MaxY());
left1 = -1;
width1 = 0;
left2 = -1;
width2 = 0;
if (canvasY < dst.Y() || canvasY >= dst.MaxY() || src.X() >= dst.MaxX() ||
src.MaxX() <= dst.X()) {
left1 = src.X();
width1 = src.Width();
return;
}
if (src.X() < dst.X()) {
left1 = src.X();
width1 = dst.X() - src.X();
}
if (src.MaxX() > dst.MaxX()) {
left2 = dst.MaxX();
width2 = src.MaxX() - dst.MaxX();
}
}
// alphaBlendPremultiplied and alphaBlendNonPremultiplied are separate methods,
// even though they only differ by one line. This is done so that the compiler
// can inline BlendSrcOverDstPremultiplied() and BlensSrcOverDstRaw() calls.
// For GIF images, this optimization reduces decoding time by 15% for 3MB
// images.
void alphaBlendPremultiplied(blink::ImageFrame& src,
blink::ImageFrame& dst,
int canvasY,
int left,
int width) {
for (int x = 0; x < width; ++x) {
int canvasX = left + x;
blink::ImageFrame::PixelData* pixel = src.GetAddr(canvasX, canvasY);
if (SkGetPackedA32(*pixel) != 0xff) {
blink::ImageFrame::PixelData prevPixel = *dst.GetAddr(canvasX, canvasY);
blink::ImageFrame::BlendSrcOverDstPremultiplied(pixel, prevPixel);
}
}
}
void alphaBlendNonPremultiplied(blink::ImageFrame& src,
blink::ImageFrame& dst,
int canvasY,
int left,
int width) {
for (int x = 0; x < width; ++x) {
int canvasX = left + x;
blink::ImageFrame::PixelData* pixel = src.GetAddr(canvasX, canvasY);
if (SkGetPackedA32(*pixel) != 0xff) {
blink::ImageFrame::PixelData prevPixel = *dst.GetAddr(canvasX, canvasY);
blink::ImageFrame::BlendSrcOverDstRaw(pixel, prevPixel);
}
}
}
} // namespace
namespace blink {
WEBPImageDecoder::WEBPImageDecoder(AlphaOption alpha_option,
const ColorBehavior& color_behavior,
size_t max_decoded_bytes)
: ImageDecoder(alpha_option, color_behavior, max_decoded_bytes),
decoder_(nullptr),
format_flags_(0),
frame_background_has_alpha_(false),
demux_(nullptr),
demux_state_(WEBP_DEMUX_PARSING_HEADER),
have_already_parsed_this_data_(false),
repetition_count_(kAnimationLoopOnce),
decoded_height_(0) {
blend_function_ = (alpha_option == kAlphaPremultiplied)
? alphaBlendPremultiplied
: alphaBlendNonPremultiplied;
}
WEBPImageDecoder::~WEBPImageDecoder() {
Clear();
}
void WEBPImageDecoder::Clear() {
WebPDemuxDelete(demux_);
demux_ = nullptr;
consolidated_data_.reset();
ClearDecoder();
}
void WEBPImageDecoder::ClearDecoder() {
WebPIDelete(decoder_);
decoder_ = nullptr;
decoded_height_ = 0;
frame_background_has_alpha_ = false;
}
void WEBPImageDecoder::OnSetData(SegmentReader*) {
have_already_parsed_this_data_ = false;
}
int WEBPImageDecoder::RepetitionCount() const {
return Failed() ? kAnimationLoopOnce : repetition_count_;
}
bool WEBPImageDecoder::FrameIsReceivedAtIndex(size_t index) const {
if (!demux_ || demux_state_ <= WEBP_DEMUX_PARSING_HEADER)
return false;
if (!(format_flags_ & ANIMATION_FLAG))
return ImageDecoder::FrameIsReceivedAtIndex(index);
bool frame_is_received_at_index = index < frame_buffer_cache_.size();
return frame_is_received_at_index;
}
TimeDelta WEBPImageDecoder::FrameDurationAtIndex(size_t index) const {
return index < frame_buffer_cache_.size()
? frame_buffer_cache_[index].Duration()
: TimeDelta();
}
bool WEBPImageDecoder::UpdateDemuxer() {
if (Failed())
return false;
const unsigned kWebpHeaderSize = 30;
if (data_->size() < kWebpHeaderSize)
return IsAllDataReceived() ? SetFailed() : false;
if (have_already_parsed_this_data_)
return true;
have_already_parsed_this_data_ = true;
if (consolidated_data_ && consolidated_data_->size() >= data_->size()) {
// Less data provided than last time. |consolidated_data_| is guaranteed
// to be its own copy of the data, so it is safe to keep it.
return true;
}
if (IsAllDataReceived() && !consolidated_data_) {
consolidated_data_ = data_->GetAsSkData();
} else {
buffer_.ReserveCapacity(data_->size());
while (buffer_.size() < data_->size()) {
const char* segment;
const size_t bytes = data_->GetSomeData(segment, buffer_.size());
DCHECK(bytes);
buffer_.Append(segment, bytes);
}
DCHECK_EQ(buffer_.size(), data_->size());
consolidated_data_ =
SkData::MakeWithoutCopy(buffer_.data(), buffer_.size());
}
WebPDemuxDelete(demux_);
WebPData input_data = {
reinterpret_cast<const uint8_t*>(consolidated_data_->data()),
consolidated_data_->size()};
demux_ = WebPDemuxPartial(&input_data, &demux_state_);
if (!demux_ || (IsAllDataReceived() && demux_state_ != WEBP_DEMUX_DONE)) {
if (!demux_)
consolidated_data_.reset();
return SetFailed();
}
DCHECK_GT(demux_state_, WEBP_DEMUX_PARSING_HEADER);
if (!WebPDemuxGetI(demux_, WEBP_FF_FRAME_COUNT))
return false; // Wait until the encoded image frame data arrives.
if (!IsDecodedSizeAvailable()) {
int width = WebPDemuxGetI(demux_, WEBP_FF_CANVAS_WIDTH);
int height = WebPDemuxGetI(demux_, WEBP_FF_CANVAS_HEIGHT);
if (!SetSize(width, height))
return SetFailed();
format_flags_ = WebPDemuxGetI(demux_, WEBP_FF_FORMAT_FLAGS);
if (!(format_flags_ & ANIMATION_FLAG)) {
repetition_count_ = kAnimationNone;
} else {
// Since we have parsed at least one frame, even if partially,
// the global animation (ANIM) properties have been read since
// an ANIM chunk must precede the ANMF frame chunks.
repetition_count_ = WebPDemuxGetI(demux_, WEBP_FF_LOOP_COUNT);
// Repetition count is always <= 16 bits.
DCHECK_EQ(repetition_count_, repetition_count_ & 0xffff);
// Repetition count is treated as n + 1 cycles for GIF. WebP defines loop
// count as the number of cycles, with 0 meaning infinite.
repetition_count_ = repetition_count_ == 0 ? kAnimationLoopInfinite
: repetition_count_ - 1;
// FIXME: Implement ICC profile support for animated images.
format_flags_ &= ~ICCP_FLAG;
}
if ((format_flags_ & ICCP_FLAG) && !IgnoresColorSpace())
ReadColorProfile();
}
DCHECK(IsDecodedSizeAvailable());
size_t frame_count = WebPDemuxGetI(demux_, WEBP_FF_FRAME_COUNT);
UpdateAggressivePurging(frame_count);
return true;
}
void WEBPImageDecoder::OnInitFrameBuffer(size_t frame_index) {
// ImageDecoder::InitFrameBuffer does a DCHECK if |frame_index| exists.
ImageFrame& buffer = frame_buffer_cache_[frame_index];
const size_t required_previous_frame_index =
buffer.RequiredPreviousFrameIndex();
if (required_previous_frame_index == kNotFound) {
frame_background_has_alpha_ =
!buffer.OriginalFrameRect().Contains(IntRect(IntPoint(), Size()));
} else {
const ImageFrame& prev_buffer =
frame_buffer_cache_[required_previous_frame_index];
frame_background_has_alpha_ =
prev_buffer.HasAlpha() || (prev_buffer.GetDisposalMethod() ==
ImageFrame::kDisposeOverwriteBgcolor);
}
// The buffer is transparent outside the decoded area while the image is
// loading. The correct alpha value for the frame will be set when it is fully
// decoded.
buffer.SetHasAlpha(true);
}
bool WEBPImageDecoder::CanReusePreviousFrameBuffer(size_t frame_index) const {
DCHECK(frame_index < frame_buffer_cache_.size());
return frame_buffer_cache_[frame_index].GetAlphaBlendSource() !=
ImageFrame::kBlendAtopPreviousFrame;
}
void WEBPImageDecoder::ClearFrameBuffer(size_t frame_index) {
if (demux_ && demux_state_ >= WEBP_DEMUX_PARSED_HEADER &&
frame_buffer_cache_[frame_index].GetStatus() ==
ImageFrame::kFramePartial) {
// Clear the decoder state so that this partial frame can be decoded again
// when requested.
ClearDecoder();
}
ImageDecoder::ClearFrameBuffer(frame_index);
}
void WEBPImageDecoder::ReadColorProfile() {
WebPChunkIterator chunk_iterator;
if (!WebPDemuxGetChunk(demux_, "ICCP", 1, &chunk_iterator)) {
WebPDemuxReleaseChunkIterator(&chunk_iterator);
return;
}
const char* profile_data =
reinterpret_cast<const char*>(chunk_iterator.chunk.bytes);
size_t profile_size = chunk_iterator.chunk.size;
sk_sp<SkColorSpace> color_space =
SkColorSpace::MakeICC(profile_data, profile_size);
if (color_space) {
if (color_space->type() == SkColorSpace::kRGB_Type)
SetEmbeddedColorSpace(std::move(color_space));
} else {
DLOG(ERROR) << "Failed to parse image ICC profile";
}
WebPDemuxReleaseChunkIterator(&chunk_iterator);
}
void WEBPImageDecoder::ApplyPostProcessing(size_t frame_index) {
ImageFrame& buffer = frame_buffer_cache_[frame_index];
int width;
int decoded_height;
if (!WebPIDecGetRGB(decoder_, &decoded_height, &width, nullptr, nullptr))
return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
if (decoded_height <= 0)
return;
const IntRect& frame_rect = buffer.OriginalFrameRect();
SECURITY_DCHECK(width == frame_rect.Width());
SECURITY_DCHECK(decoded_height <= frame_rect.Height());
const int left = frame_rect.X();
const int top = frame_rect.Y();
// TODO (msarett):
// Here we apply the color space transformation to the dst space.
// It does not really make sense to transform to a gamma-encoded
// space and then immediately after, perform a linear premultiply
// and linear blending. Can we find a way to perform the
// premultiplication and blending in a linear space?
SkColorSpaceXform* xform = ColorTransform();
if (xform) {
const SkColorSpaceXform::ColorFormat kSrcFormat =
SkColorSpaceXform::kBGRA_8888_ColorFormat;
const SkColorSpaceXform::ColorFormat kDstFormat =
SkColorSpaceXform::kRGBA_8888_ColorFormat;
for (int y = decoded_height_; y < decoded_height; ++y) {
const int canvas_y = top + y;
uint8_t* row = reinterpret_cast<uint8_t*>(buffer.GetAddr(left, canvas_y));
bool color_converison_successful = xform->apply(
kDstFormat, row, kSrcFormat, row, width, kUnpremul_SkAlphaType);
DCHECK(color_converison_successful);
uint8_t* pixel = row;
for (int x = 0; x < width; ++x, pixel += 4) {
const int canvas_x = left + x;
buffer.SetRGBA(canvas_x, canvas_y, pixel[0], pixel[1], pixel[2],
pixel[3]);
}
}
}
// During the decoding of the current frame, we may have set some pixels to be
// transparent (i.e. alpha < 255). If the alpha blend source was
// 'BlendAtopPreviousFrame', the values of these pixels should be determined
// by blending them against the pixels of the corresponding previous frame.
// Compute the correct opaque values now.
// FIXME: This could be avoided if libwebp decoder had an API that used the
// previous required frame to do the alpha-blending by itself.
if ((format_flags_ & ANIMATION_FLAG) && frame_index &&
buffer.GetAlphaBlendSource() == ImageFrame::kBlendAtopPreviousFrame &&
buffer.RequiredPreviousFrameIndex() != kNotFound) {
ImageFrame& prev_buffer = frame_buffer_cache_[frame_index - 1];
DCHECK_EQ(prev_buffer.GetStatus(), ImageFrame::kFrameComplete);
ImageFrame::DisposalMethod prev_disposal_method =
prev_buffer.GetDisposalMethod();
if (prev_disposal_method == ImageFrame::kDisposeKeep) {
// Blend transparent pixels with pixels in previous canvas.
for (int y = decoded_height_; y < decoded_height; ++y) {
blend_function_(buffer, prev_buffer, top + y, left, width);
}
} else if (prev_disposal_method == ImageFrame::kDisposeOverwriteBgcolor) {
const IntRect& prev_rect = prev_buffer.OriginalFrameRect();
// We need to blend a transparent pixel with the starting value (from just
// after the InitFrame() call). If the pixel belongs to prev_rect, the
// starting value was fully transparent, so this is a no-op. Otherwise, we
// need to blend against the pixel from the previous canvas.
for (int y = decoded_height_; y < decoded_height; ++y) {
int canvas_y = top + y;
int left1, width1, left2, width2;
findBlendRangeAtRow(frame_rect, prev_rect, canvas_y, left1, width1,
left2, width2);
if (width1 > 0)
blend_function_(buffer, prev_buffer, canvas_y, left1, width1);
if (width2 > 0)
blend_function_(buffer, prev_buffer, canvas_y, left2, width2);
}
}
}
decoded_height_ = decoded_height;
buffer.SetPixelsChanged(true);
}
size_t WEBPImageDecoder::DecodeFrameCount() {
// If UpdateDemuxer() fails, return the existing number of frames. This way
// if we get halfway through the image before decoding fails, we won't
// suddenly start reporting that the image has zero frames.
return UpdateDemuxer() ? WebPDemuxGetI(demux_, WEBP_FF_FRAME_COUNT)
: frame_buffer_cache_.size();
}
void WEBPImageDecoder::InitializeNewFrame(size_t index) {
if (!(format_flags_ & ANIMATION_FLAG)) {
DCHECK(!index);
return;
}
WebPIterator animated_frame;
WebPDemuxGetFrame(demux_, index + 1, &animated_frame);
DCHECK_EQ(animated_frame.complete, 1);
ImageFrame* buffer = &frame_buffer_cache_[index];
IntRect frame_rect(animated_frame.x_offset, animated_frame.y_offset,
animated_frame.width, animated_frame.height);
buffer->SetOriginalFrameRect(
Intersection(frame_rect, IntRect(IntPoint(), Size())));
buffer->SetDuration(TimeDelta::FromMilliseconds(animated_frame.duration));
buffer->SetDisposalMethod(animated_frame.dispose_method ==
WEBP_MUX_DISPOSE_BACKGROUND
? ImageFrame::kDisposeOverwriteBgcolor
: ImageFrame::kDisposeKeep);
buffer->SetAlphaBlendSource(animated_frame.blend_method == WEBP_MUX_BLEND
? ImageFrame::kBlendAtopPreviousFrame
: ImageFrame::kBlendAtopBgcolor);
buffer->SetRequiredPreviousFrameIndex(
FindRequiredPreviousFrame(index, !animated_frame.has_alpha));
WebPDemuxReleaseIterator(&animated_frame);
}
void WEBPImageDecoder::Decode(size_t index) {
if (Failed())
return;
Vector<size_t> frames_to_decode = FindFramesToDecode(index);
DCHECK(demux_);
for (auto i = frames_to_decode.rbegin(); i != frames_to_decode.rend(); ++i) {
if ((format_flags_ & ANIMATION_FLAG) && !InitFrameBuffer(*i)) {
SetFailed();
return;
}
WebPIterator webp_frame;
if (!WebPDemuxGetFrame(demux_, *i + 1, &webp_frame)) {
SetFailed();
} else {
DecodeSingleFrame(webp_frame.fragment.bytes, webp_frame.fragment.size,
*i);
WebPDemuxReleaseIterator(&webp_frame);
}
if (Failed())
return;
// If this returns false, we need more data to continue decoding.
if (!PostDecodeProcessing(*i))
break;
}
// It is also a fatal error if all data is received and we have decoded all
// frames available but the file is truncated.
if (index >= frame_buffer_cache_.size() - 1 && IsAllDataReceived() &&
demux_ && demux_state_ != WEBP_DEMUX_DONE)
SetFailed();
}
bool WEBPImageDecoder::DecodeSingleFrame(const uint8_t* data_bytes,
size_t data_size,
size_t frame_index) {
if (Failed())
return false;
DCHECK(IsDecodedSizeAvailable());
DCHECK_GT(frame_buffer_cache_.size(), frame_index);
ImageFrame& buffer = frame_buffer_cache_[frame_index];
DCHECK_NE(buffer.GetStatus(), ImageFrame::kFrameComplete);
if (buffer.GetStatus() == ImageFrame::kFrameEmpty) {
if (!buffer.AllocatePixelData(Size().Width(), Size().Height(),
ColorSpaceForSkImages()))
return SetFailed();
buffer.ZeroFillPixelData();
buffer.SetStatus(ImageFrame::kFramePartial);
// The buffer is transparent outside the decoded area while the image is
// loading. The correct alpha value for the frame will be set when it is
// fully decoded.
buffer.SetHasAlpha(true);
buffer.SetOriginalFrameRect(IntRect(IntPoint(), Size()));
}
const IntRect& frame_rect = buffer.OriginalFrameRect();
if (!decoder_) {
WEBP_CSP_MODE mode = outputMode(format_flags_ & ALPHA_FLAG);
if (!premultiply_alpha_)
mode = outputMode(false);
if (ColorTransform()) {
// Swizzling between RGBA and BGRA is zero cost in a color transform.
// So when we have a color transform, we should decode to whatever is
// easiest for libwebp, and then let the color transform swizzle if
// necessary.
// Lossy webp is encoded as YUV (so RGBA and BGRA are the same cost).
// Lossless webp is encoded as BGRA. This means decoding to BGRA is
// either faster or the same cost as RGBA.
mode = MODE_BGRA;
}
WebPInitDecBuffer(&decoder_buffer_);
decoder_buffer_.colorspace = mode;
decoder_buffer_.u.RGBA.stride =
Size().Width() * sizeof(ImageFrame::PixelData);
decoder_buffer_.u.RGBA.size =
decoder_buffer_.u.RGBA.stride * frame_rect.Height();
decoder_buffer_.is_external_memory = 1;
decoder_ = WebPINewDecoder(&decoder_buffer_);
if (!decoder_)
return SetFailed();
}
decoder_buffer_.u.RGBA.rgba = reinterpret_cast<uint8_t*>(
buffer.GetAddr(frame_rect.X(), frame_rect.Y()));
switch (WebPIUpdate(decoder_, data_bytes, data_size)) {
case VP8_STATUS_OK:
ApplyPostProcessing(frame_index);
buffer.SetHasAlpha((format_flags_ & ALPHA_FLAG) ||
frame_background_has_alpha_);
buffer.SetStatus(ImageFrame::kFrameComplete);
ClearDecoder();
return true;
case VP8_STATUS_SUSPENDED:
if (!IsAllDataReceived() && !FrameIsReceivedAtIndex(frame_index)) {
ApplyPostProcessing(frame_index);
return false;
}
FALLTHROUGH;
default:
Clear();
return SetFailed();
}
}
} // namespace blink
| null | null | null | null | 33,533 |
318 | null |
train_val
|
b09a65ece69306a70044ac99ca6928eda58d7c79
| 270,625 |
tcpdump
| 0 |
https://github.com/the-tcpdump-group/tcpdump
|
2017-09-14 11:59:38-07:00
|
/*
* Copyright (c) 1990, 1991, 1993, 1994, 1995, 1996, 1997
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that: (1) source code distributions
* retain the above copyright notice and this paragraph in its entirety, (2)
* distributions including binary code include the above copyright notice and
* this paragraph in its entirety in the documentation or other materials
* provided with the distribution, and (3) all advertising materials mentioning
* features or use of this software display the following acknowledgement:
* ``This product includes software developed by the University of California,
* Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
* the University nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior
* written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* \summary: Trivial File Transfer Protocol (TFTP) printer */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <netdissect-stdinc.h>
#include <string.h>
#include "netdissect.h"
#include "extract.h"
/*
* Trivial File Transfer Protocol (IEN-133)
*/
/*
* Packet types.
*/
#define RRQ 01 /* read request */
#define WRQ 02 /* write request */
#define DATA 03 /* data packet */
#define ACK 04 /* acknowledgement */
#define TFTP_ERROR 05 /* error code */
#define OACK 06 /* option acknowledgement */
/*
* Error codes.
*/
#define EUNDEF 0 /* not defined */
#define ENOTFOUND 1 /* file not found */
#define EACCESS 2 /* access violation */
#define ENOSPACE 3 /* disk full or allocation exceeded */
#define EBADOP 4 /* illegal TFTP operation */
#define EBADID 5 /* unknown transfer ID */
#define EEXISTS 6 /* file already exists */
#define ENOUSER 7 /* no such user */
static const char tstr[] = " [|tftp]";
/* op code to string mapping */
static const struct tok op2str[] = {
{ RRQ, "RRQ" }, /* read request */
{ WRQ, "WRQ" }, /* write request */
{ DATA, "DATA" }, /* data packet */
{ ACK, "ACK" }, /* acknowledgement */
{ TFTP_ERROR, "ERROR" }, /* error code */
{ OACK, "OACK" }, /* option acknowledgement */
{ 0, NULL }
};
/* error code to string mapping */
static const struct tok err2str[] = {
{ EUNDEF, "EUNDEF" }, /* not defined */
{ ENOTFOUND, "ENOTFOUND" }, /* file not found */
{ EACCESS, "EACCESS" }, /* access violation */
{ ENOSPACE, "ENOSPACE" }, /* disk full or allocation exceeded */
{ EBADOP, "EBADOP" }, /* illegal TFTP operation */
{ EBADID, "EBADID" }, /* unknown transfer ID */
{ EEXISTS, "EEXISTS" }, /* file already exists */
{ ENOUSER, "ENOUSER" }, /* no such user */
{ 0, NULL }
};
/*
* Print trivial file transfer program requests
*/
void
tftp_print(netdissect_options *ndo,
register const u_char *bp, u_int length)
{
register const char *cp;
register int opcode;
u_int ui;
/* Print length */
ND_PRINT((ndo, " %d", length));
/* Print tftp request type */
if (length < 2)
goto trunc;
ND_TCHECK_16BITS(bp);
opcode = EXTRACT_16BITS(bp);
cp = tok2str(op2str, "tftp-#%d", opcode);
ND_PRINT((ndo, " %s", cp));
/* Bail if bogus opcode */
if (*cp == 't')
return;
bp += 2;
length -= 2;
switch (opcode) {
case RRQ:
case WRQ:
if (length == 0)
goto trunc;
ND_PRINT((ndo, " "));
/* Print filename */
ND_PRINT((ndo, "\""));
ui = fn_printztn(ndo, bp, length, ndo->ndo_snapend);
ND_PRINT((ndo, "\""));
if (ui == 0)
goto trunc;
bp += ui;
length -= ui;
/* Print the mode - RRQ and WRQ only */
if (length == 0)
goto trunc; /* no mode */
ND_PRINT((ndo, " "));
ui = fn_printztn(ndo, bp, length, ndo->ndo_snapend);
if (ui == 0)
goto trunc;
bp += ui;
length -= ui;
/* Print options, if any */
while (length != 0) {
ND_TCHECK(*bp);
if (*bp != '\0')
ND_PRINT((ndo, " "));
ui = fn_printztn(ndo, bp, length, ndo->ndo_snapend);
if (ui == 0)
goto trunc;
bp += ui;
length -= ui;
}
break;
case OACK:
/* Print options */
while (length != 0) {
ND_TCHECK(*bp);
if (*bp != '\0')
ND_PRINT((ndo, " "));
ui = fn_printztn(ndo, bp, length, ndo->ndo_snapend);
if (ui == 0)
goto trunc;
bp += ui;
length -= ui;
}
break;
case ACK:
case DATA:
if (length < 2)
goto trunc; /* no block number */
ND_TCHECK_16BITS(bp);
ND_PRINT((ndo, " block %d", EXTRACT_16BITS(bp)));
break;
case TFTP_ERROR:
/* Print error code string */
if (length < 2)
goto trunc; /* no error code */
ND_TCHECK_16BITS(bp);
ND_PRINT((ndo, " %s", tok2str(err2str, "tftp-err-#%d \"",
EXTRACT_16BITS(bp))));
bp += 2;
length -= 2;
/* Print error message string */
if (length == 0)
goto trunc; /* no error message */
ND_PRINT((ndo, " \""));
ui = fn_printztn(ndo, bp, length, ndo->ndo_snapend);
ND_PRINT((ndo, "\""));
if (ui == 0)
goto trunc;
break;
default:
/* We shouldn't get here */
ND_PRINT((ndo, "(unknown #%d)", opcode));
break;
}
return;
trunc:
ND_PRINT((ndo, "%s", tstr));
return;
}
| null | null | null | null | 124,233 |
11,415 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 176,410 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BACKTRACE_H
#define _ASM_TILE_BACKTRACE_H
#include <linux/types.h>
/* Reads 'size' bytes from 'address' and writes the data to 'result'.
* Returns true if successful, else false (e.g. memory not readable).
*/
typedef bool (*BacktraceMemoryReader)(void *result,
unsigned long address,
unsigned int size,
void *extra);
typedef struct {
/* Current PC. */
unsigned long pc;
/* Current stack pointer value. */
unsigned long sp;
/* Current frame pointer value (i.e. caller's stack pointer) */
unsigned long fp;
/* Internal use only: caller's PC for first frame. */
unsigned long initial_frame_caller_pc;
/* Internal use only: callback to read memory. */
BacktraceMemoryReader read_memory_func;
/* Internal use only: arbitrary argument to read_memory_func. */
void *read_memory_func_extra;
} BacktraceIterator;
typedef enum {
/* We have no idea what the caller's pc is. */
PC_LOC_UNKNOWN,
/* The caller's pc is currently in lr. */
PC_LOC_IN_LR,
/* The caller's pc can be found by dereferencing the caller's sp. */
PC_LOC_ON_STACK
} CallerPCLocation;
typedef enum {
/* We have no idea what the caller's sp is. */
SP_LOC_UNKNOWN,
/* The caller's sp is currently in r52. */
SP_LOC_IN_R52,
/* The caller's sp can be found by adding a certain constant
* to the current value of sp.
*/
SP_LOC_OFFSET
} CallerSPLocation;
/* Bit values ORed into CALLER_* values for info ops. */
enum {
/* Setting the low bit on any of these values means the info op
* applies only to one bundle ago.
*/
ONE_BUNDLE_AGO_FLAG = 1,
/* Setting this bit on a CALLER_SP_* value means the PC is in LR.
* If not set, PC is on the stack.
*/
PC_IN_LR_FLAG = 2,
/* This many of the low bits of a CALLER_SP_* value are for the
* flag bits above.
*/
NUM_INFO_OP_FLAGS = 2,
/* We cannot have one in the memory pipe so this is the maximum. */
MAX_INFO_OPS_PER_BUNDLE = 2
};
/* Internal constants used to define 'info' operands. */
enum {
/* 0 and 1 are reserved, as are all negative numbers. */
CALLER_UNKNOWN_BASE = 2,
CALLER_SP_IN_R52_BASE = 4,
CALLER_SP_OFFSET_BASE = 8,
};
/* Current backtracer state describing where it thinks the caller is. */
typedef struct {
/*
* Public fields
*/
/* How do we find the caller's PC? */
CallerPCLocation pc_location : 8;
/* How do we find the caller's SP? */
CallerSPLocation sp_location : 8;
/* If sp_location == SP_LOC_OFFSET, then caller_sp == sp +
* loc->sp_offset. Else this field is undefined.
*/
uint16_t sp_offset;
/* In the most recently visited bundle a terminating bundle? */
bool at_terminating_bundle;
/*
* Private fields
*/
/* Will the forward scanner see someone clobbering sp
* (i.e. changing it with something other than addi sp, sp, N?)
*/
bool sp_clobber_follows;
/* Operand to next "visible" info op (no more than one bundle past
* the next terminating bundle), or -32768 if none.
*/
int16_t next_info_operand;
/* Is the info of in next_info_op in the very next bundle? */
bool is_next_info_operand_adjacent;
} CallerLocation;
extern void backtrace_init(BacktraceIterator *state,
BacktraceMemoryReader read_memory_func,
void *read_memory_func_extra,
unsigned long pc, unsigned long lr,
unsigned long sp, unsigned long r52);
extern bool backtrace_next(BacktraceIterator *state);
#endif /* _ASM_TILE_BACKTRACE_H */
| null | null | null | null | 84,757 |
620 | null |
train_val
|
31e986bc171719c9e6d40d0c2cb1501796a69e6c
| 259,575 |
php-src
| 0 |
https://github.com/php/php-src
|
2016-10-24 10:37:20+01:00
|
/*
+----------------------------------------------------------------------+
| PHP Version 7 |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Scott MacVicar <scottmac@php.net> |
+----------------------------------------------------------------------+
*/
#ifndef SPOOFCHECKER_SPOOFCHECKER_H
#define SPOOFCHECKER_SPOOFCHECKER_H
#include <php.h>
void spoofchecker_register_constants(INIT_FUNC_ARGS);
#endif // SPOOFCHECKER_SPOOFCHECKER_H
| null | null | null | null | 119,496 |
26,292 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 26,292 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains the default suppressions for AddressSanitizer.
// It should only be used under very limited circumstances such as suppressing
// a report caused by an interceptor call in a system-installed library.
#if defined(ADDRESS_SANITIZER)
// Please make sure the code below declares a single string variable
// kASanDefaultSuppressions which contains ASan suppressions delimited by
// newlines.
char kASanDefaultSuppressions[] =
// http://crbug.com/178677
"interceptor_via_lib:libsqlite3.so\n"
// PLEASE READ ABOVE BEFORE ADDING NEW SUPPRESSIONS.
// End of suppressions.
; // Please keep this semicolon.
#endif // ADDRESS_SANITIZER
| null | null | null | null | 23,155 |
13,965 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 13,965 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/offline_pages/core/prefetch/suggested_articles_observer.h"
#include "base/memory/ptr_util.h"
#include "base/run_loop.h"
#include "base/strings/utf_string_conversions.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "components/offline_pages/core/client_namespace_constants.h"
#include "components/offline_pages/core/prefetch/prefetch_dispatcher.h"
#include "components/offline_pages/core/prefetch/prefetch_gcm_app_handler.h"
#include "components/offline_pages/core/prefetch/prefetch_service.h"
#include "components/offline_pages/core/prefetch/prefetch_service_test_taco.h"
#include "components/offline_pages/core/prefetch/test_prefetch_dispatcher.h"
#include "components/offline_pages/core/stub_offline_page_model.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "url/gurl.h"
using ntp_snippets::Category;
using ntp_snippets::ContentSuggestion;
namespace offline_pages {
namespace {
const base::string16 kTestTitle = base::ASCIIToUTF16("Title 1");
ContentSuggestion ContentSuggestionFromTestURL(const GURL& test_url) {
auto category =
Category::FromKnownCategory(ntp_snippets::KnownCategories::ARTICLES);
ContentSuggestion suggestion =
ContentSuggestion(category, test_url.spec(), test_url);
suggestion.set_title(kTestTitle);
return suggestion;
}
} // namespace
class OfflinePageSuggestedArticlesObserverTest : public testing::Test {
public:
OfflinePageSuggestedArticlesObserverTest()
: task_runner_(new base::TestSimpleTaskRunner),
task_runner_handle_(task_runner_) {}
void SetUp() override {
prefetch_service_test_taco_ = std::make_unique<PrefetchServiceTestTaco>();
test_prefetch_dispatcher_ = new TestPrefetchDispatcher();
prefetch_service_test_taco_->SetPrefetchDispatcher(
base::WrapUnique(test_prefetch_dispatcher_));
prefetch_service_test_taco_->SetSuggestedArticlesObserver(
std::make_unique<SuggestedArticlesObserver>());
prefetch_service_test_taco_->CreatePrefetchService();
}
void TearDown() override {
// Ensure the store can be properly disposed off.
prefetch_service_test_taco_.reset();
task_runner_->RunUntilIdle();
}
SuggestedArticlesObserver* observer() {
return prefetch_service_test_taco_->prefetch_service()
->GetSuggestedArticlesObserver();
}
TestPrefetchDispatcher* test_prefetch_dispatcher() {
return test_prefetch_dispatcher_;
}
protected:
Category category =
Category::FromKnownCategory(ntp_snippets::KnownCategories::ARTICLES);
private:
scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
base::ThreadTaskRunnerHandle task_runner_handle_;
std::unique_ptr<PrefetchServiceTestTaco> prefetch_service_test_taco_;
// Owned by the PrefetchServiceTestTaco.
TestPrefetchDispatcher* test_prefetch_dispatcher_;
};
TEST_F(OfflinePageSuggestedArticlesObserverTest,
ForwardsSuggestionsToPrefetchService) {
const GURL test_url_1("https://www.example.com/1");
observer()->GetTestingArticles()->push_back(
ContentSuggestionFromTestURL(test_url_1));
observer()->OnCategoryStatusChanged(category,
ntp_snippets::CategoryStatus::AVAILABLE);
observer()->OnNewSuggestions(category);
EXPECT_EQ(1, test_prefetch_dispatcher()->new_suggestions_count);
EXPECT_EQ(1U, test_prefetch_dispatcher()->latest_prefetch_urls.size());
EXPECT_EQ(test_url_1,
test_prefetch_dispatcher()->latest_prefetch_urls[0].url);
EXPECT_EQ(kTestTitle,
test_prefetch_dispatcher()->latest_prefetch_urls[0].title);
EXPECT_EQ(kSuggestedArticlesNamespace,
test_prefetch_dispatcher()->latest_name_space);
}
TEST_F(OfflinePageSuggestedArticlesObserverTest, RemovesAllOnBadStatus) {
const GURL test_url_1("https://www.example.com/1");
const GURL test_url_2("https://www.example.com/2");
observer()->GetTestingArticles()->push_back(
ContentSuggestionFromTestURL(test_url_1));
observer()->GetTestingArticles()->push_back(
ContentSuggestionFromTestURL(test_url_2));
observer()->OnCategoryStatusChanged(category,
ntp_snippets::CategoryStatus::AVAILABLE);
observer()->OnNewSuggestions(category);
ASSERT_EQ(2U, test_prefetch_dispatcher()->latest_prefetch_urls.size());
observer()->OnCategoryStatusChanged(
category, ntp_snippets::CategoryStatus::CATEGORY_EXPLICITLY_DISABLED);
EXPECT_EQ(1, test_prefetch_dispatcher()->remove_all_suggestions_count);
observer()->OnCategoryStatusChanged(
category,
ntp_snippets::CategoryStatus::ALL_SUGGESTIONS_EXPLICITLY_DISABLED);
EXPECT_EQ(2, test_prefetch_dispatcher()->remove_all_suggestions_count);
}
TEST_F(OfflinePageSuggestedArticlesObserverTest, RemovesClientIdOnInvalidated) {
const GURL test_url_1("https://www.example.com/1");
observer()->GetTestingArticles()->push_back(
ContentSuggestionFromTestURL(test_url_1));
observer()->OnCategoryStatusChanged(category,
ntp_snippets::CategoryStatus::AVAILABLE);
observer()->OnNewSuggestions(category);
ASSERT_EQ(1U, test_prefetch_dispatcher()->latest_prefetch_urls.size());
observer()->OnSuggestionInvalidated(
ntp_snippets::ContentSuggestion::ID(category, test_url_1.spec()));
EXPECT_EQ(1, test_prefetch_dispatcher()->remove_by_client_id_count);
EXPECT_NE(nullptr, test_prefetch_dispatcher()->last_removed_client_id.get());
EXPECT_EQ(test_url_1.spec(),
test_prefetch_dispatcher()->last_removed_client_id->id);
EXPECT_EQ(kSuggestedArticlesNamespace,
test_prefetch_dispatcher()->latest_name_space);
}
} // namespace offline_pages
| null | null | null | null | 10,828 |
6,232 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 171,227 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Driver for AMD7930 sound chips found on Sparcs.
* Copyright (C) 2002, 2008 David S. Miller <davem@davemloft.net>
*
* Based entirely upon drivers/sbus/audio/amd7930.c which is:
* Copyright (C) 1996,1997 Thomas K. Dyas (tdyas@eden.rutgers.edu)
*
* --- Notes from Thomas's original driver ---
* This is the lowlevel driver for the AMD7930 audio chip found on all
* sun4c machines and some sun4m machines.
*
* The amd7930 is actually an ISDN chip which has a very simple
* integrated audio encoder/decoder. When Sun decided on what chip to
* use for audio, they had the brilliant idea of using the amd7930 and
* only connecting the audio encoder/decoder pins.
*
* Thanks to the AMD engineer who was able to get us the AMD79C30
* databook which has all the programming information and gain tables.
*
* Advanced Micro Devices' Am79C30A is an ISDN/audio chip used in the
* SparcStation 1+. The chip provides microphone and speaker interfaces
* which provide mono-channel audio at 8K samples per second via either
* 8-bit A-law or 8-bit mu-law encoding. Also, the chip features an
* ISDN BRI Line Interface Unit (LIU), I.430 S/T physical interface,
* which performs basic D channel LAPD processing and provides raw
* B channel data. The digital audio channel, the two ISDN B channels,
* and two 64 Kbps channels to the microprocessor are all interconnected
* via a multiplexer.
* --- End of notes from Thoamas's original driver ---
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/initval.h>
#include <asm/irq.h>
#include <asm/prom.h>
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Sun AMD7930 soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Sun AMD7930 soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable Sun AMD7930 soundcard.");
MODULE_AUTHOR("Thomas K. Dyas and David S. Miller");
MODULE_DESCRIPTION("Sun AMD7930");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Sun,AMD7930}}");
/* Device register layout. */
/* Register interface presented to the CPU by the amd7930. */
#define AMD7930_CR 0x00UL /* Command Register (W) */
#define AMD7930_IR AMD7930_CR /* Interrupt Register (R) */
#define AMD7930_DR 0x01UL /* Data Register (R/W) */
#define AMD7930_DSR1 0x02UL /* D-channel Status Register 1 (R) */
#define AMD7930_DER 0x03UL /* D-channel Error Register (R) */
#define AMD7930_DCTB 0x04UL /* D-channel Transmit Buffer (W) */
#define AMD7930_DCRB AMD7930_DCTB /* D-channel Receive Buffer (R) */
#define AMD7930_BBTB 0x05UL /* Bb-channel Transmit Buffer (W) */
#define AMD7930_BBRB AMD7930_BBTB /* Bb-channel Receive Buffer (R) */
#define AMD7930_BCTB 0x06UL /* Bc-channel Transmit Buffer (W) */
#define AMD7930_BCRB AMD7930_BCTB /* Bc-channel Receive Buffer (R) */
#define AMD7930_DSR2 0x07UL /* D-channel Status Register 2 (R) */
/* Indirect registers in the Main Audio Processor. */
struct amd7930_map {
__u16 x[8];
__u16 r[8];
__u16 gx;
__u16 gr;
__u16 ger;
__u16 stgr;
__u16 ftgr;
__u16 atgr;
__u8 mmr1;
__u8 mmr2;
};
/* After an amd7930 interrupt, reading the Interrupt Register (ir)
* clears the interrupt and returns a bitmask indicating which
* interrupt source(s) require service.
*/
#define AMR_IR_DTTHRSH 0x01 /* D-channel xmit threshold */
#define AMR_IR_DRTHRSH 0x02 /* D-channel recv threshold */
#define AMR_IR_DSRI 0x04 /* D-channel packet status */
#define AMR_IR_DERI 0x08 /* D-channel error */
#define AMR_IR_BBUF 0x10 /* B-channel data xfer */
#define AMR_IR_LSRI 0x20 /* LIU status */
#define AMR_IR_DSR2I 0x40 /* D-channel buffer status */
#define AMR_IR_MLTFRMI 0x80 /* multiframe or PP */
/* The amd7930 has "indirect registers" which are accessed by writing
* the register number into the Command Register and then reading or
* writing values from the Data Register as appropriate. We define the
* AMR_* macros to be the indirect register numbers and AM_* macros to
* be bits in whatever register is referred to.
*/
/* Initialization */
#define AMR_INIT 0x21
#define AM_INIT_ACTIVE 0x01
#define AM_INIT_DATAONLY 0x02
#define AM_INIT_POWERDOWN 0x03
#define AM_INIT_DISABLE_INTS 0x04
#define AMR_INIT2 0x20
#define AM_INIT2_ENABLE_POWERDOWN 0x20
#define AM_INIT2_ENABLE_MULTIFRAME 0x10
/* Line Interface Unit */
#define AMR_LIU_LSR 0xA1
#define AM_LIU_LSR_STATE 0x07
#define AM_LIU_LSR_F3 0x08
#define AM_LIU_LSR_F7 0x10
#define AM_LIU_LSR_F8 0x20
#define AM_LIU_LSR_HSW 0x40
#define AM_LIU_LSR_HSW_CHG 0x80
#define AMR_LIU_LPR 0xA2
#define AMR_LIU_LMR1 0xA3
#define AM_LIU_LMR1_B1_ENABL 0x01
#define AM_LIU_LMR1_B2_ENABL 0x02
#define AM_LIU_LMR1_F_DISABL 0x04
#define AM_LIU_LMR1_FA_DISABL 0x08
#define AM_LIU_LMR1_REQ_ACTIV 0x10
#define AM_LIU_LMR1_F8_F3 0x20
#define AM_LIU_LMR1_LIU_ENABL 0x40
#define AMR_LIU_LMR2 0xA4
#define AM_LIU_LMR2_DECHO 0x01
#define AM_LIU_LMR2_DLOOP 0x02
#define AM_LIU_LMR2_DBACKOFF 0x04
#define AM_LIU_LMR2_EN_F3_INT 0x08
#define AM_LIU_LMR2_EN_F8_INT 0x10
#define AM_LIU_LMR2_EN_HSW_INT 0x20
#define AM_LIU_LMR2_EN_F7_INT 0x40
#define AMR_LIU_2_4 0xA5
#define AMR_LIU_MF 0xA6
#define AMR_LIU_MFSB 0xA7
#define AMR_LIU_MFQB 0xA8
/* Multiplexor */
#define AMR_MUX_MCR1 0x41
#define AMR_MUX_MCR2 0x42
#define AMR_MUX_MCR3 0x43
#define AM_MUX_CHANNEL_B1 0x01
#define AM_MUX_CHANNEL_B2 0x02
#define AM_MUX_CHANNEL_Ba 0x03
#define AM_MUX_CHANNEL_Bb 0x04
#define AM_MUX_CHANNEL_Bc 0x05
#define AM_MUX_CHANNEL_Bd 0x06
#define AM_MUX_CHANNEL_Be 0x07
#define AM_MUX_CHANNEL_Bf 0x08
#define AMR_MUX_MCR4 0x44
#define AM_MUX_MCR4_ENABLE_INTS 0x08
#define AM_MUX_MCR4_REVERSE_Bb 0x10
#define AM_MUX_MCR4_REVERSE_Bc 0x20
#define AMR_MUX_1_4 0x45
/* Main Audio Processor */
#define AMR_MAP_X 0x61
#define AMR_MAP_R 0x62
#define AMR_MAP_GX 0x63
#define AMR_MAP_GR 0x64
#define AMR_MAP_GER 0x65
#define AMR_MAP_STGR 0x66
#define AMR_MAP_FTGR_1_2 0x67
#define AMR_MAP_ATGR_1_2 0x68
#define AMR_MAP_MMR1 0x69
#define AM_MAP_MMR1_ALAW 0x01
#define AM_MAP_MMR1_GX 0x02
#define AM_MAP_MMR1_GR 0x04
#define AM_MAP_MMR1_GER 0x08
#define AM_MAP_MMR1_X 0x10
#define AM_MAP_MMR1_R 0x20
#define AM_MAP_MMR1_STG 0x40
#define AM_MAP_MMR1_LOOPBACK 0x80
#define AMR_MAP_MMR2 0x6A
#define AM_MAP_MMR2_AINB 0x01
#define AM_MAP_MMR2_LS 0x02
#define AM_MAP_MMR2_ENABLE_DTMF 0x04
#define AM_MAP_MMR2_ENABLE_TONEGEN 0x08
#define AM_MAP_MMR2_ENABLE_TONERING 0x10
#define AM_MAP_MMR2_DISABLE_HIGHPASS 0x20
#define AM_MAP_MMR2_DISABLE_AUTOZERO 0x40
#define AMR_MAP_1_10 0x6B
#define AMR_MAP_MMR3 0x6C
#define AMR_MAP_STRA 0x6D
#define AMR_MAP_STRF 0x6E
#define AMR_MAP_PEAKX 0x70
#define AMR_MAP_PEAKR 0x71
#define AMR_MAP_15_16 0x72
/* Data Link Controller */
#define AMR_DLC_FRAR_1_2_3 0x81
#define AMR_DLC_SRAR_1_2_3 0x82
#define AMR_DLC_TAR 0x83
#define AMR_DLC_DRLR 0x84
#define AMR_DLC_DTCR 0x85
#define AMR_DLC_DMR1 0x86
#define AMR_DLC_DMR1_DTTHRSH_INT 0x01
#define AMR_DLC_DMR1_DRTHRSH_INT 0x02
#define AMR_DLC_DMR1_TAR_ENABL 0x04
#define AMR_DLC_DMR1_EORP_INT 0x08
#define AMR_DLC_DMR1_EN_ADDR1 0x10
#define AMR_DLC_DMR1_EN_ADDR2 0x20
#define AMR_DLC_DMR1_EN_ADDR3 0x40
#define AMR_DLC_DMR1_EN_ADDR4 0x80
#define AMR_DLC_DMR1_EN_ADDRS 0xf0
#define AMR_DLC_DMR2 0x87
#define AMR_DLC_DMR2_RABRT_INT 0x01
#define AMR_DLC_DMR2_RESID_INT 0x02
#define AMR_DLC_DMR2_COLL_INT 0x04
#define AMR_DLC_DMR2_FCS_INT 0x08
#define AMR_DLC_DMR2_OVFL_INT 0x10
#define AMR_DLC_DMR2_UNFL_INT 0x20
#define AMR_DLC_DMR2_OVRN_INT 0x40
#define AMR_DLC_DMR2_UNRN_INT 0x80
#define AMR_DLC_1_7 0x88
#define AMR_DLC_DRCR 0x89
#define AMR_DLC_RNGR1 0x8A
#define AMR_DLC_RNGR2 0x8B
#define AMR_DLC_FRAR4 0x8C
#define AMR_DLC_SRAR4 0x8D
#define AMR_DLC_DMR3 0x8E
#define AMR_DLC_DMR3_VA_INT 0x01
#define AMR_DLC_DMR3_EOTP_INT 0x02
#define AMR_DLC_DMR3_LBRP_INT 0x04
#define AMR_DLC_DMR3_RBA_INT 0x08
#define AMR_DLC_DMR3_LBT_INT 0x10
#define AMR_DLC_DMR3_TBE_INT 0x20
#define AMR_DLC_DMR3_RPLOST_INT 0x40
#define AMR_DLC_DMR3_KEEP_FCS 0x80
#define AMR_DLC_DMR4 0x8F
#define AMR_DLC_DMR4_RCV_1 0x00
#define AMR_DLC_DMR4_RCV_2 0x01
#define AMR_DLC_DMR4_RCV_4 0x02
#define AMR_DLC_DMR4_RCV_8 0x03
#define AMR_DLC_DMR4_RCV_16 0x01
#define AMR_DLC_DMR4_RCV_24 0x02
#define AMR_DLC_DMR4_RCV_30 0x03
#define AMR_DLC_DMR4_XMT_1 0x00
#define AMR_DLC_DMR4_XMT_2 0x04
#define AMR_DLC_DMR4_XMT_4 0x08
#define AMR_DLC_DMR4_XMT_8 0x0c
#define AMR_DLC_DMR4_XMT_10 0x08
#define AMR_DLC_DMR4_XMT_14 0x0c
#define AMR_DLC_DMR4_IDLE_MARK 0x00
#define AMR_DLC_DMR4_IDLE_FLAG 0x10
#define AMR_DLC_DMR4_ADDR_BOTH 0x00
#define AMR_DLC_DMR4_ADDR_1ST 0x20
#define AMR_DLC_DMR4_ADDR_2ND 0xa0
#define AMR_DLC_DMR4_CR_ENABLE 0x40
#define AMR_DLC_12_15 0x90
#define AMR_DLC_ASR 0x91
#define AMR_DLC_EFCR 0x92
#define AMR_DLC_EFCR_EXTEND_FIFO 0x01
#define AMR_DLC_EFCR_SEC_PKT_INT 0x02
#define AMR_DSR1_VADDR 0x01
#define AMR_DSR1_EORP 0x02
#define AMR_DSR1_PKT_IP 0x04
#define AMR_DSR1_DECHO_ON 0x08
#define AMR_DSR1_DLOOP_ON 0x10
#define AMR_DSR1_DBACK_OFF 0x20
#define AMR_DSR1_EOTP 0x40
#define AMR_DSR1_CXMT_ABRT 0x80
#define AMR_DSR2_LBRP 0x01
#define AMR_DSR2_RBA 0x02
#define AMR_DSR2_RPLOST 0x04
#define AMR_DSR2_LAST_BYTE 0x08
#define AMR_DSR2_TBE 0x10
#define AMR_DSR2_MARK_IDLE 0x20
#define AMR_DSR2_FLAG_IDLE 0x40
#define AMR_DSR2_SECOND_PKT 0x80
#define AMR_DER_RABRT 0x01
#define AMR_DER_RFRAME 0x02
#define AMR_DER_COLLISION 0x04
#define AMR_DER_FCS 0x08
#define AMR_DER_OVFL 0x10
#define AMR_DER_UNFL 0x20
#define AMR_DER_OVRN 0x40
#define AMR_DER_UNRN 0x80
/* Peripheral Port */
#define AMR_PP_PPCR1 0xC0
#define AMR_PP_PPSR 0xC1
#define AMR_PP_PPIER 0xC2
#define AMR_PP_MTDR 0xC3
#define AMR_PP_MRDR 0xC3
#define AMR_PP_CITDR0 0xC4
#define AMR_PP_CIRDR0 0xC4
#define AMR_PP_CITDR1 0xC5
#define AMR_PP_CIRDR1 0xC5
#define AMR_PP_PPCR2 0xC8
#define AMR_PP_PPCR3 0xC9
struct snd_amd7930 {
spinlock_t lock;
void __iomem *regs;
u32 flags;
#define AMD7930_FLAG_PLAYBACK 0x00000001
#define AMD7930_FLAG_CAPTURE 0x00000002
struct amd7930_map map;
struct snd_card *card;
struct snd_pcm *pcm;
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
/* Playback/Capture buffer state. */
unsigned char *p_orig, *p_cur;
int p_left;
unsigned char *c_orig, *c_cur;
int c_left;
int rgain;
int pgain;
int mgain;
struct platform_device *op;
unsigned int irq;
struct snd_amd7930 *next;
};
static struct snd_amd7930 *amd7930_list;
/* Idle the AMD7930 chip. The amd->lock is not held. */
static __inline__ void amd7930_idle(struct snd_amd7930 *amd)
{
unsigned long flags;
spin_lock_irqsave(&amd->lock, flags);
sbus_writeb(AMR_INIT, amd->regs + AMD7930_CR);
sbus_writeb(0, amd->regs + AMD7930_DR);
spin_unlock_irqrestore(&amd->lock, flags);
}
/* Enable chip interrupts. The amd->lock is not held. */
static __inline__ void amd7930_enable_ints(struct snd_amd7930 *amd)
{
unsigned long flags;
spin_lock_irqsave(&amd->lock, flags);
sbus_writeb(AMR_INIT, amd->regs + AMD7930_CR);
sbus_writeb(AM_INIT_ACTIVE, amd->regs + AMD7930_DR);
spin_unlock_irqrestore(&amd->lock, flags);
}
/* Disable chip interrupts. The amd->lock is not held. */
static __inline__ void amd7930_disable_ints(struct snd_amd7930 *amd)
{
unsigned long flags;
spin_lock_irqsave(&amd->lock, flags);
sbus_writeb(AMR_INIT, amd->regs + AMD7930_CR);
sbus_writeb(AM_INIT_ACTIVE | AM_INIT_DISABLE_INTS, amd->regs + AMD7930_DR);
spin_unlock_irqrestore(&amd->lock, flags);
}
/* Commit amd7930_map settings to the hardware.
* The amd->lock is held and local interrupts are disabled.
*/
static void __amd7930_write_map(struct snd_amd7930 *amd)
{
struct amd7930_map *map = &amd->map;
sbus_writeb(AMR_MAP_GX, amd->regs + AMD7930_CR);
sbus_writeb(((map->gx >> 0) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(((map->gx >> 8) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(AMR_MAP_GR, amd->regs + AMD7930_CR);
sbus_writeb(((map->gr >> 0) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(((map->gr >> 8) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(AMR_MAP_STGR, amd->regs + AMD7930_CR);
sbus_writeb(((map->stgr >> 0) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(((map->stgr >> 8) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(AMR_MAP_GER, amd->regs + AMD7930_CR);
sbus_writeb(((map->ger >> 0) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(((map->ger >> 8) & 0xff), amd->regs + AMD7930_DR);
sbus_writeb(AMR_MAP_MMR1, amd->regs + AMD7930_CR);
sbus_writeb(map->mmr1, amd->regs + AMD7930_DR);
sbus_writeb(AMR_MAP_MMR2, amd->regs + AMD7930_CR);
sbus_writeb(map->mmr2, amd->regs + AMD7930_DR);
}
/* gx, gr & stg gains. this table must contain 256 elements with
* the 0th being "infinity" (the magic value 9008). The remaining
* elements match sun's gain curve (but with higher resolution):
* -18 to 0dB in .16dB steps then 0 to 12dB in .08dB steps.
*/
static __const__ __u16 gx_coeff[256] = {
0x9008, 0x8b7c, 0x8b51, 0x8b45, 0x8b42, 0x8b3b, 0x8b36, 0x8b33,
0x8b32, 0x8b2a, 0x8b2b, 0x8b2c, 0x8b25, 0x8b23, 0x8b22, 0x8b22,
0x9122, 0x8b1a, 0x8aa3, 0x8aa3, 0x8b1c, 0x8aa6, 0x912d, 0x912b,
0x8aab, 0x8b12, 0x8aaa, 0x8ab2, 0x9132, 0x8ab4, 0x913c, 0x8abb,
0x9142, 0x9144, 0x9151, 0x8ad5, 0x8aeb, 0x8a79, 0x8a5a, 0x8a4a,
0x8b03, 0x91c2, 0x91bb, 0x8a3f, 0x8a33, 0x91b2, 0x9212, 0x9213,
0x8a2c, 0x921d, 0x8a23, 0x921a, 0x9222, 0x9223, 0x922d, 0x9231,
0x9234, 0x9242, 0x925b, 0x92dd, 0x92c1, 0x92b3, 0x92ab, 0x92a4,
0x92a2, 0x932b, 0x9341, 0x93d3, 0x93b2, 0x93a2, 0x943c, 0x94b2,
0x953a, 0x9653, 0x9782, 0x9e21, 0x9d23, 0x9cd2, 0x9c23, 0x9baa,
0x9bde, 0x9b33, 0x9b22, 0x9b1d, 0x9ab2, 0xa142, 0xa1e5, 0x9a3b,
0xa213, 0xa1a2, 0xa231, 0xa2eb, 0xa313, 0xa334, 0xa421, 0xa54b,
0xada4, 0xac23, 0xab3b, 0xaaab, 0xaa5c, 0xb1a3, 0xb2ca, 0xb3bd,
0xbe24, 0xbb2b, 0xba33, 0xc32b, 0xcb5a, 0xd2a2, 0xe31d, 0x0808,
0x72ba, 0x62c2, 0x5c32, 0x52db, 0x513e, 0x4cce, 0x43b2, 0x4243,
0x41b4, 0x3b12, 0x3bc3, 0x3df2, 0x34bd, 0x3334, 0x32c2, 0x3224,
0x31aa, 0x2a7b, 0x2aaa, 0x2b23, 0x2bba, 0x2c42, 0x2e23, 0x25bb,
0x242b, 0x240f, 0x231a, 0x22bb, 0x2241, 0x2223, 0x221f, 0x1a33,
0x1a4a, 0x1acd, 0x2132, 0x1b1b, 0x1b2c, 0x1b62, 0x1c12, 0x1c32,
0x1d1b, 0x1e71, 0x16b1, 0x1522, 0x1434, 0x1412, 0x1352, 0x1323,
0x1315, 0x12bc, 0x127a, 0x1235, 0x1226, 0x11a2, 0x1216, 0x0a2a,
0x11bc, 0x11d1, 0x1163, 0x0ac2, 0x0ab2, 0x0aab, 0x0b1b, 0x0b23,
0x0b33, 0x0c0f, 0x0bb3, 0x0c1b, 0x0c3e, 0x0cb1, 0x0d4c, 0x0ec1,
0x079a, 0x0614, 0x0521, 0x047c, 0x0422, 0x03b1, 0x03e3, 0x0333,
0x0322, 0x031c, 0x02aa, 0x02ba, 0x02f2, 0x0242, 0x0232, 0x0227,
0x0222, 0x021b, 0x01ad, 0x0212, 0x01b2, 0x01bb, 0x01cb, 0x01f6,
0x0152, 0x013a, 0x0133, 0x0131, 0x012c, 0x0123, 0x0122, 0x00a2,
0x011b, 0x011e, 0x0114, 0x00b1, 0x00aa, 0x00b3, 0x00bd, 0x00ba,
0x00c5, 0x00d3, 0x00f3, 0x0062, 0x0051, 0x0042, 0x003b, 0x0033,
0x0032, 0x002a, 0x002c, 0x0025, 0x0023, 0x0022, 0x001a, 0x0021,
0x001b, 0x001b, 0x001d, 0x0015, 0x0013, 0x0013, 0x0012, 0x0012,
0x000a, 0x000a, 0x0011, 0x0011, 0x000b, 0x000b, 0x000c, 0x000e,
};
static __const__ __u16 ger_coeff[] = {
0x431f, /* 5. dB */
0x331f, /* 5.5 dB */
0x40dd, /* 6. dB */
0x11dd, /* 6.5 dB */
0x440f, /* 7. dB */
0x411f, /* 7.5 dB */
0x311f, /* 8. dB */
0x5520, /* 8.5 dB */
0x10dd, /* 9. dB */
0x4211, /* 9.5 dB */
0x410f, /* 10. dB */
0x111f, /* 10.5 dB */
0x600b, /* 11. dB */
0x00dd, /* 11.5 dB */
0x4210, /* 12. dB */
0x110f, /* 13. dB */
0x7200, /* 14. dB */
0x2110, /* 15. dB */
0x2200, /* 15.9 dB */
0x000b, /* 16.9 dB */
0x000f /* 18. dB */
};
/* Update amd7930_map settings and program them into the hardware.
* The amd->lock is held and local interrupts are disabled.
*/
static void __amd7930_update_map(struct snd_amd7930 *amd)
{
struct amd7930_map *map = &amd->map;
int level;
map->gx = gx_coeff[amd->rgain];
map->stgr = gx_coeff[amd->mgain];
level = (amd->pgain * (256 + ARRAY_SIZE(ger_coeff))) >> 8;
if (level >= 256) {
map->ger = ger_coeff[level - 256];
map->gr = gx_coeff[255];
} else {
map->ger = ger_coeff[0];
map->gr = gx_coeff[level];
}
__amd7930_write_map(amd);
}
static irqreturn_t snd_amd7930_interrupt(int irq, void *dev_id)
{
struct snd_amd7930 *amd = dev_id;
unsigned int elapsed;
u8 ir;
spin_lock(&amd->lock);
elapsed = 0;
ir = sbus_readb(amd->regs + AMD7930_IR);
if (ir & AMR_IR_BBUF) {
u8 byte;
if (amd->flags & AMD7930_FLAG_PLAYBACK) {
if (amd->p_left > 0) {
byte = *(amd->p_cur++);
amd->p_left--;
sbus_writeb(byte, amd->regs + AMD7930_BBTB);
if (amd->p_left == 0)
elapsed |= AMD7930_FLAG_PLAYBACK;
} else
sbus_writeb(0, amd->regs + AMD7930_BBTB);
} else if (amd->flags & AMD7930_FLAG_CAPTURE) {
byte = sbus_readb(amd->regs + AMD7930_BBRB);
if (amd->c_left > 0) {
*(amd->c_cur++) = byte;
amd->c_left--;
if (amd->c_left == 0)
elapsed |= AMD7930_FLAG_CAPTURE;
}
}
}
spin_unlock(&amd->lock);
if (elapsed & AMD7930_FLAG_PLAYBACK)
snd_pcm_period_elapsed(amd->playback_substream);
else
snd_pcm_period_elapsed(amd->capture_substream);
return IRQ_HANDLED;
}
static int snd_amd7930_trigger(struct snd_amd7930 *amd, unsigned int flag, int cmd)
{
unsigned long flags;
int result = 0;
spin_lock_irqsave(&amd->lock, flags);
if (cmd == SNDRV_PCM_TRIGGER_START) {
if (!(amd->flags & flag)) {
amd->flags |= flag;
/* Enable B channel interrupts. */
sbus_writeb(AMR_MUX_MCR4, amd->regs + AMD7930_CR);
sbus_writeb(AM_MUX_MCR4_ENABLE_INTS, amd->regs + AMD7930_DR);
}
} else if (cmd == SNDRV_PCM_TRIGGER_STOP) {
if (amd->flags & flag) {
amd->flags &= ~flag;
/* Disable B channel interrupts. */
sbus_writeb(AMR_MUX_MCR4, amd->regs + AMD7930_CR);
sbus_writeb(0, amd->regs + AMD7930_DR);
}
} else {
result = -EINVAL;
}
spin_unlock_irqrestore(&amd->lock, flags);
return result;
}
static int snd_amd7930_playback_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
return snd_amd7930_trigger(amd, AMD7930_FLAG_PLAYBACK, cmd);
}
static int snd_amd7930_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
return snd_amd7930_trigger(amd, AMD7930_FLAG_CAPTURE, cmd);
}
static int snd_amd7930_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
unsigned long flags;
u8 new_mmr1;
spin_lock_irqsave(&amd->lock, flags);
amd->flags |= AMD7930_FLAG_PLAYBACK;
/* Setup the pseudo-dma transfer pointers. */
amd->p_orig = amd->p_cur = runtime->dma_area;
amd->p_left = size;
/* Put the chip into the correct encoding format. */
new_mmr1 = amd->map.mmr1;
if (runtime->format == SNDRV_PCM_FORMAT_A_LAW)
new_mmr1 |= AM_MAP_MMR1_ALAW;
else
new_mmr1 &= ~AM_MAP_MMR1_ALAW;
if (new_mmr1 != amd->map.mmr1) {
amd->map.mmr1 = new_mmr1;
__amd7930_update_map(amd);
}
spin_unlock_irqrestore(&amd->lock, flags);
return 0;
}
static int snd_amd7930_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
unsigned long flags;
u8 new_mmr1;
spin_lock_irqsave(&amd->lock, flags);
amd->flags |= AMD7930_FLAG_CAPTURE;
/* Setup the pseudo-dma transfer pointers. */
amd->c_orig = amd->c_cur = runtime->dma_area;
amd->c_left = size;
/* Put the chip into the correct encoding format. */
new_mmr1 = amd->map.mmr1;
if (runtime->format == SNDRV_PCM_FORMAT_A_LAW)
new_mmr1 |= AM_MAP_MMR1_ALAW;
else
new_mmr1 &= ~AM_MAP_MMR1_ALAW;
if (new_mmr1 != amd->map.mmr1) {
amd->map.mmr1 = new_mmr1;
__amd7930_update_map(amd);
}
spin_unlock_irqrestore(&amd->lock, flags);
return 0;
}
static snd_pcm_uframes_t snd_amd7930_playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(amd->flags & AMD7930_FLAG_PLAYBACK))
return 0;
ptr = amd->p_cur - amd->p_orig;
return bytes_to_frames(substream->runtime, ptr);
}
static snd_pcm_uframes_t snd_amd7930_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(amd->flags & AMD7930_FLAG_CAPTURE))
return 0;
ptr = amd->c_cur - amd->c_orig;
return bytes_to_frames(substream->runtime, ptr);
}
/* Playback and capture have identical properties. */
static struct snd_pcm_hardware snd_amd7930_pcm_hw =
{
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_HALF_DUPLEX),
.formats = SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW,
.rates = SNDRV_PCM_RATE_8000,
.rate_min = 8000,
.rate_max = 8000,
.channels_min = 1,
.channels_max = 1,
.buffer_bytes_max = (64*1024),
.period_bytes_min = 1,
.period_bytes_max = (64*1024),
.periods_min = 1,
.periods_max = 1024,
};
static int snd_amd7930_playback_open(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
amd->playback_substream = substream;
runtime->hw = snd_amd7930_pcm_hw;
return 0;
}
static int snd_amd7930_capture_open(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
amd->capture_substream = substream;
runtime->hw = snd_amd7930_pcm_hw;
return 0;
}
static int snd_amd7930_playback_close(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
amd->playback_substream = NULL;
return 0;
}
static int snd_amd7930_capture_close(struct snd_pcm_substream *substream)
{
struct snd_amd7930 *amd = snd_pcm_substream_chip(substream);
amd->capture_substream = NULL;
return 0;
}
static int snd_amd7930_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
static int snd_amd7930_hw_free(struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
static struct snd_pcm_ops snd_amd7930_playback_ops = {
.open = snd_amd7930_playback_open,
.close = snd_amd7930_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_amd7930_hw_params,
.hw_free = snd_amd7930_hw_free,
.prepare = snd_amd7930_playback_prepare,
.trigger = snd_amd7930_playback_trigger,
.pointer = snd_amd7930_playback_pointer,
};
static struct snd_pcm_ops snd_amd7930_capture_ops = {
.open = snd_amd7930_capture_open,
.close = snd_amd7930_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_amd7930_hw_params,
.hw_free = snd_amd7930_hw_free,
.prepare = snd_amd7930_capture_prepare,
.trigger = snd_amd7930_capture_trigger,
.pointer = snd_amd7930_capture_pointer,
};
static int snd_amd7930_pcm(struct snd_amd7930 *amd)
{
struct snd_pcm *pcm;
int err;
if ((err = snd_pcm_new(amd->card,
/* ID */ "sun_amd7930",
/* device */ 0,
/* playback count */ 1,
/* capture count */ 1, &pcm)) < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_amd7930_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_amd7930_capture_ops);
pcm->private_data = amd;
pcm->info_flags = 0;
strcpy(pcm->name, amd->card->shortname);
amd->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL),
64*1024, 64*1024);
return 0;
}
#define VOLUME_MONITOR 0
#define VOLUME_CAPTURE 1
#define VOLUME_PLAYBACK 2
static int snd_amd7930_info_volume(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 255;
return 0;
}
static int snd_amd7930_get_volume(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
{
struct snd_amd7930 *amd = snd_kcontrol_chip(kctl);
int type = kctl->private_value;
int *swval;
switch (type) {
case VOLUME_MONITOR:
swval = &amd->mgain;
break;
case VOLUME_CAPTURE:
swval = &amd->rgain;
break;
case VOLUME_PLAYBACK:
default:
swval = &amd->pgain;
break;
}
ucontrol->value.integer.value[0] = *swval;
return 0;
}
static int snd_amd7930_put_volume(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
{
struct snd_amd7930 *amd = snd_kcontrol_chip(kctl);
unsigned long flags;
int type = kctl->private_value;
int *swval, change;
switch (type) {
case VOLUME_MONITOR:
swval = &amd->mgain;
break;
case VOLUME_CAPTURE:
swval = &amd->rgain;
break;
case VOLUME_PLAYBACK:
default:
swval = &amd->pgain;
break;
}
spin_lock_irqsave(&amd->lock, flags);
if (*swval != ucontrol->value.integer.value[0]) {
*swval = ucontrol->value.integer.value[0] & 0xff;
__amd7930_update_map(amd);
change = 1;
} else
change = 0;
spin_unlock_irqrestore(&amd->lock, flags);
return change;
}
static struct snd_kcontrol_new amd7930_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Monitor Volume",
.index = 0,
.info = snd_amd7930_info_volume,
.get = snd_amd7930_get_volume,
.put = snd_amd7930_put_volume,
.private_value = VOLUME_MONITOR,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Volume",
.index = 0,
.info = snd_amd7930_info_volume,
.get = snd_amd7930_get_volume,
.put = snd_amd7930_put_volume,
.private_value = VOLUME_CAPTURE,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Playback Volume",
.index = 0,
.info = snd_amd7930_info_volume,
.get = snd_amd7930_get_volume,
.put = snd_amd7930_put_volume,
.private_value = VOLUME_PLAYBACK,
},
};
static int snd_amd7930_mixer(struct snd_amd7930 *amd)
{
struct snd_card *card;
int idx, err;
if (snd_BUG_ON(!amd || !amd->card))
return -EINVAL;
card = amd->card;
strcpy(card->mixername, card->shortname);
for (idx = 0; idx < ARRAY_SIZE(amd7930_controls); idx++) {
if ((err = snd_ctl_add(card,
snd_ctl_new1(&amd7930_controls[idx], amd))) < 0)
return err;
}
return 0;
}
static int snd_amd7930_free(struct snd_amd7930 *amd)
{
struct platform_device *op = amd->op;
amd7930_idle(amd);
if (amd->irq)
free_irq(amd->irq, amd);
if (amd->regs)
of_iounmap(&op->resource[0], amd->regs,
resource_size(&op->resource[0]));
kfree(amd);
return 0;
}
static int snd_amd7930_dev_free(struct snd_device *device)
{
struct snd_amd7930 *amd = device->device_data;
return snd_amd7930_free(amd);
}
static struct snd_device_ops snd_amd7930_dev_ops = {
.dev_free = snd_amd7930_dev_free,
};
static int snd_amd7930_create(struct snd_card *card,
struct platform_device *op,
int irq, int dev,
struct snd_amd7930 **ramd)
{
struct snd_amd7930 *amd;
unsigned long flags;
int err;
*ramd = NULL;
amd = kzalloc(sizeof(*amd), GFP_KERNEL);
if (amd == NULL)
return -ENOMEM;
spin_lock_init(&amd->lock);
amd->card = card;
amd->op = op;
amd->regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]), "amd7930");
if (!amd->regs) {
snd_printk(KERN_ERR
"amd7930-%d: Unable to map chip registers.\n", dev);
kfree(amd);
return -EIO;
}
amd7930_idle(amd);
if (request_irq(irq, snd_amd7930_interrupt,
IRQF_SHARED, "amd7930", amd)) {
snd_printk(KERN_ERR "amd7930-%d: Unable to grab IRQ %d\n",
dev, irq);
snd_amd7930_free(amd);
return -EBUSY;
}
amd->irq = irq;
amd7930_enable_ints(amd);
spin_lock_irqsave(&amd->lock, flags);
amd->rgain = 128;
amd->pgain = 200;
amd->mgain = 0;
memset(&amd->map, 0, sizeof(amd->map));
amd->map.mmr1 = (AM_MAP_MMR1_GX | AM_MAP_MMR1_GER |
AM_MAP_MMR1_GR | AM_MAP_MMR1_STG);
amd->map.mmr2 = (AM_MAP_MMR2_LS | AM_MAP_MMR2_AINB);
__amd7930_update_map(amd);
/* Always MUX audio (Ba) to channel Bb. */
sbus_writeb(AMR_MUX_MCR1, amd->regs + AMD7930_CR);
sbus_writeb(AM_MUX_CHANNEL_Ba | (AM_MUX_CHANNEL_Bb << 4),
amd->regs + AMD7930_DR);
spin_unlock_irqrestore(&amd->lock, flags);
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
amd, &snd_amd7930_dev_ops)) < 0) {
snd_amd7930_free(amd);
return err;
}
*ramd = amd;
return 0;
}
static int amd7930_sbus_probe(struct platform_device *op)
{
struct resource *rp = &op->resource[0];
static int dev_num;
struct snd_card *card;
struct snd_amd7930 *amd;
int err, irq;
irq = op->archdata.irqs[0];
if (dev_num >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev_num]) {
dev_num++;
return -ENOENT;
}
err = snd_card_new(&op->dev, index[dev_num], id[dev_num],
THIS_MODULE, 0, &card);
if (err < 0)
return err;
strcpy(card->driver, "AMD7930");
strcpy(card->shortname, "Sun AMD7930");
sprintf(card->longname, "%s at 0x%02lx:0x%08Lx, irq %d",
card->shortname,
rp->flags & 0xffL,
(unsigned long long)rp->start,
irq);
if ((err = snd_amd7930_create(card, op,
irq, dev_num, &amd)) < 0)
goto out_err;
if ((err = snd_amd7930_pcm(amd)) < 0)
goto out_err;
if ((err = snd_amd7930_mixer(amd)) < 0)
goto out_err;
if ((err = snd_card_register(card)) < 0)
goto out_err;
amd->next = amd7930_list;
amd7930_list = amd;
dev_num++;
return 0;
out_err:
snd_card_free(card);
return err;
}
static const struct of_device_id amd7930_match[] = {
{
.name = "audio",
},
{},
};
MODULE_DEVICE_TABLE(of, amd7930_match);
static struct platform_driver amd7930_sbus_driver = {
.driver = {
.name = "audio",
.of_match_table = amd7930_match,
},
.probe = amd7930_sbus_probe,
};
static int __init amd7930_init(void)
{
return platform_driver_register(&amd7930_sbus_driver);
}
static void __exit amd7930_exit(void)
{
struct snd_amd7930 *p = amd7930_list;
while (p != NULL) {
struct snd_amd7930 *next = p->next;
snd_card_free(p->card);
p = next;
}
amd7930_list = NULL;
platform_driver_unregister(&amd7930_sbus_driver);
}
module_init(amd7930_init);
module_exit(amd7930_exit);
| null | null | null | null | 79,574 |
41,473 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 41,473 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/gn/config_values_extractors.h"
#include "tools/gn/escape.h"
namespace {
class EscapedStringWriter {
public:
explicit EscapedStringWriter(const EscapeOptions& escape_options)
: escape_options_(escape_options) {
}
void operator()(const std::string& s, std::ostream& out) const {
out << " ";
EscapeStringToStream(out, s, escape_options_);
}
private:
const EscapeOptions& escape_options_;
};
} // namespace
void RecursiveTargetConfigStringsToStream(
const Target* target,
const std::vector<std::string>& (ConfigValues::* getter)() const,
const EscapeOptions& escape_options,
std::ostream& out) {
RecursiveTargetConfigToStream(target, getter,
EscapedStringWriter(escape_options), out);
}
| null | null | null | null | 38,336 |
66,595 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 66,595 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/policy/cloud/remote_commands_invalidator_impl.h"
#include "base/logging.h"
#include "components/policy/core/common/remote_commands/remote_commands_service.h"
namespace policy {
RemoteCommandsInvalidatorImpl::RemoteCommandsInvalidatorImpl(
CloudPolicyCore* core)
: core_(core) {
DCHECK(core_);
}
void RemoteCommandsInvalidatorImpl::OnInitialize() {
core_->AddObserver(this);
if (core_->remote_commands_service())
OnRemoteCommandsServiceStarted(core_);
}
void RemoteCommandsInvalidatorImpl::OnShutdown() {
core_->RemoveObserver(this);
}
void RemoteCommandsInvalidatorImpl::OnStart() {
core_->store()->AddObserver(this);
OnStoreLoaded(core_->store());
}
void RemoteCommandsInvalidatorImpl::OnStop() {
core_->store()->RemoveObserver(this);
}
void RemoteCommandsInvalidatorImpl::DoRemoteCommandsFetch() {
DCHECK(core_->remote_commands_service());
core_->remote_commands_service()->FetchRemoteCommands();
}
void RemoteCommandsInvalidatorImpl::OnCoreConnected(CloudPolicyCore* core) {
}
void RemoteCommandsInvalidatorImpl::OnRefreshSchedulerStarted(
CloudPolicyCore* core) {
}
void RemoteCommandsInvalidatorImpl::OnCoreDisconnecting(CloudPolicyCore* core) {
Stop();
}
void RemoteCommandsInvalidatorImpl::OnRemoteCommandsServiceStarted(
CloudPolicyCore* core) {
Start();
}
void RemoteCommandsInvalidatorImpl::OnStoreLoaded(CloudPolicyStore* core) {
ReloadPolicyData(core_->store()->policy());
}
void RemoteCommandsInvalidatorImpl::OnStoreError(CloudPolicyStore* core) {
}
} // namespace policy
| null | null | null | null | 63,458 |
5,823 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 170,818 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* da7219-aad.c - Dialog DA7219 ALSA SoC AAD Driver
*
* Copyright (c) 2015 Dialog Semiconductor Ltd.
*
* Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/property.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <sound/da7219.h>
#include "da7219.h"
#include "da7219-aad.h"
/*
* Detection control
*/
void da7219_aad_jack_det(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
da7219->aad->jack = jack;
da7219->aad->jack_inserted = false;
/* Send an initial empty report */
snd_soc_jack_report(jack, 0, DA7219_AAD_REPORT_ALL_MASK);
/* Enable/Disable jack detection */
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
DA7219_ACCDET_EN_MASK,
(jack ? DA7219_ACCDET_EN_MASK : 0));
}
EXPORT_SYMBOL_GPL(da7219_aad_jack_det);
/*
* Button/HPTest work
*/
static void da7219_aad_btn_det_work(struct work_struct *work)
{
struct da7219_aad_priv *da7219_aad =
container_of(work, struct da7219_aad_priv, btn_det_work);
struct snd_soc_codec *codec = da7219_aad->codec;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
u8 statusa, micbias_ctrl;
bool micbias_up = false;
int retries = 0;
/* Drive headphones/lineout */
snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_OE_MASK,
DA7219_HP_L_AMP_OE_MASK);
snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
DA7219_HP_R_AMP_OE_MASK,
DA7219_HP_R_AMP_OE_MASK);
/* Make sure mic bias is up */
snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
snd_soc_dapm_sync(dapm);
do {
statusa = snd_soc_read(codec, DA7219_ACCDET_STATUS_A);
if (statusa & DA7219_MICBIAS_UP_STS_MASK)
micbias_up = true;
else if (retries++ < DA7219_AAD_MICBIAS_CHK_RETRIES)
msleep(DA7219_AAD_MICBIAS_CHK_DELAY);
} while ((!micbias_up) && (retries < DA7219_AAD_MICBIAS_CHK_RETRIES));
if (retries >= DA7219_AAD_MICBIAS_CHK_RETRIES)
dev_warn(codec->dev, "Mic bias status check timed out");
/*
* Mic bias pulse required to enable mic, must be done before enabling
* button detection to prevent erroneous button readings.
*/
if (da7219_aad->micbias_pulse_lvl && da7219_aad->micbias_pulse_time) {
/* Pulse higher level voltage */
micbias_ctrl = snd_soc_read(codec, DA7219_MICBIAS_CTRL);
snd_soc_update_bits(codec, DA7219_MICBIAS_CTRL,
DA7219_MICBIAS1_LEVEL_MASK,
da7219_aad->micbias_pulse_lvl);
msleep(da7219_aad->micbias_pulse_time);
snd_soc_write(codec, DA7219_MICBIAS_CTRL, micbias_ctrl);
}
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
DA7219_BUTTON_CONFIG_MASK,
da7219_aad->btn_cfg);
}
static void da7219_aad_hptest_work(struct work_struct *work)
{
struct da7219_aad_priv *da7219_aad =
container_of(work, struct da7219_aad_priv, hptest_work);
struct snd_soc_codec *codec = da7219_aad->codec;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
u16 tonegen_freq_hptest;
u8 pll_srm_sts, gain_ramp_ctrl, accdet_cfg8;
int report = 0, ret = 0;
/* Lock DAPM and any Kcontrols that are affected by this test */
snd_soc_dapm_mutex_lock(dapm);
mutex_lock(&da7219->lock);
/* Ensure MCLK is available for HP test procedure */
if (da7219->mclk) {
ret = clk_prepare_enable(da7219->mclk);
if (ret) {
dev_err(codec->dev, "Failed to enable mclk - %d\n", ret);
mutex_unlock(&da7219->lock);
snd_soc_dapm_mutex_unlock(dapm);
return;
}
}
/*
* If MCLK not present, then we're using the internal oscillator and
* require different frequency settings to achieve the same result.
*/
pll_srm_sts = snd_soc_read(codec, DA7219_PLL_SRM_STS);
if (pll_srm_sts & DA7219_PLL_SRM_STS_MCLK)
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ);
else
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
/* Ensure gain ramping at fastest rate */
gain_ramp_ctrl = snd_soc_read(codec, DA7219_GAIN_RAMP_CTRL);
snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
/* Bypass cache so it saves current settings */
regcache_cache_bypass(da7219->regmap, true);
/* Make sure Tone Generator is disabled */
snd_soc_write(codec, DA7219_TONE_GEN_CFG1, 0);
/* Enable HPTest block, 1KOhms check */
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_8,
DA7219_HPTEST_EN_MASK | DA7219_HPTEST_RES_SEL_MASK,
DA7219_HPTEST_EN_MASK |
DA7219_HPTEST_RES_SEL_1KOHMS);
/* Set gains to 0db */
snd_soc_write(codec, DA7219_DAC_L_GAIN, DA7219_DAC_DIGITAL_GAIN_0DB);
snd_soc_write(codec, DA7219_DAC_R_GAIN, DA7219_DAC_DIGITAL_GAIN_0DB);
snd_soc_write(codec, DA7219_HP_L_GAIN, DA7219_HP_AMP_GAIN_0DB);
snd_soc_write(codec, DA7219_HP_R_GAIN, DA7219_HP_AMP_GAIN_0DB);
/* Disable DAC filters, EQs and soft mute */
snd_soc_update_bits(codec, DA7219_DAC_FILTERS1, DA7219_HPF_MODE_MASK,
0);
snd_soc_update_bits(codec, DA7219_DAC_FILTERS4, DA7219_DAC_EQ_EN_MASK,
0);
snd_soc_update_bits(codec, DA7219_DAC_FILTERS5,
DA7219_DAC_SOFTMUTE_EN_MASK, 0);
/* Enable HP left & right paths */
snd_soc_update_bits(codec, DA7219_CP_CTRL, DA7219_CP_EN_MASK,
DA7219_CP_EN_MASK);
snd_soc_update_bits(codec, DA7219_DIG_ROUTING_DAC,
DA7219_DAC_L_SRC_MASK | DA7219_DAC_R_SRC_MASK,
DA7219_DAC_L_SRC_TONEGEN |
DA7219_DAC_R_SRC_TONEGEN);
snd_soc_update_bits(codec, DA7219_DAC_L_CTRL,
DA7219_DAC_L_EN_MASK | DA7219_DAC_L_MUTE_EN_MASK,
DA7219_DAC_L_EN_MASK);
snd_soc_update_bits(codec, DA7219_DAC_R_CTRL,
DA7219_DAC_R_EN_MASK | DA7219_DAC_R_MUTE_EN_MASK,
DA7219_DAC_R_EN_MASK);
snd_soc_update_bits(codec, DA7219_MIXOUT_L_SELECT,
DA7219_MIXOUT_L_MIX_SELECT_MASK,
DA7219_MIXOUT_L_MIX_SELECT_MASK);
snd_soc_update_bits(codec, DA7219_MIXOUT_R_SELECT,
DA7219_MIXOUT_R_MIX_SELECT_MASK,
DA7219_MIXOUT_R_MIX_SELECT_MASK);
snd_soc_update_bits(codec, DA7219_DROUTING_ST_OUTFILT_1L,
DA7219_OUTFILT_ST_1L_SRC_MASK,
DA7219_DMIX_ST_SRC_OUTFILT1L);
snd_soc_update_bits(codec, DA7219_DROUTING_ST_OUTFILT_1R,
DA7219_OUTFILT_ST_1R_SRC_MASK,
DA7219_DMIX_ST_SRC_OUTFILT1R);
snd_soc_update_bits(codec, DA7219_MIXOUT_L_CTRL,
DA7219_MIXOUT_L_AMP_EN_MASK,
DA7219_MIXOUT_L_AMP_EN_MASK);
snd_soc_update_bits(codec, DA7219_MIXOUT_R_CTRL,
DA7219_MIXOUT_R_AMP_EN_MASK,
DA7219_MIXOUT_R_AMP_EN_MASK);
snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK,
DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK);
snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK,
DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK);
msleep(DA7219_SETTLING_DELAY);
snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_MUTE_EN_MASK |
DA7219_HP_L_AMP_MIN_GAIN_EN_MASK, 0);
snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
DA7219_HP_R_AMP_MUTE_EN_MASK |
DA7219_HP_R_AMP_MIN_GAIN_EN_MASK, 0);
/*
* If we're running from the internal oscillator then give audio paths
* time to settle before running test.
*/
if (!(pll_srm_sts & DA7219_PLL_SRM_STS_MCLK))
msleep(DA7219_AAD_HPTEST_INT_OSC_PATH_DELAY);
/* Configure & start Tone Generator */
snd_soc_write(codec, DA7219_TONE_GEN_ON_PER, DA7219_BEEP_ON_PER_MASK);
regmap_raw_write(da7219->regmap, DA7219_TONE_GEN_FREQ1_L,
&tonegen_freq_hptest, sizeof(tonegen_freq_hptest));
snd_soc_update_bits(codec, DA7219_TONE_GEN_CFG2,
DA7219_SWG_SEL_MASK | DA7219_TONE_GEN_GAIN_MASK,
DA7219_SWG_SEL_SRAMP |
DA7219_TONE_GEN_GAIN_MINUS_15DB);
snd_soc_write(codec, DA7219_TONE_GEN_CFG1, DA7219_START_STOPN_MASK);
msleep(DA7219_AAD_HPTEST_PERIOD);
/* Grab comparator reading */
accdet_cfg8 = snd_soc_read(codec, DA7219_ACCDET_CONFIG_8);
if (accdet_cfg8 & DA7219_HPTEST_COMP_MASK)
report |= SND_JACK_HEADPHONE;
else
report |= SND_JACK_LINEOUT;
/* Stop tone generator */
snd_soc_write(codec, DA7219_TONE_GEN_CFG1, 0);
msleep(DA7219_AAD_HPTEST_PERIOD);
/* Restore original settings from cache */
regcache_mark_dirty(da7219->regmap);
regcache_sync_region(da7219->regmap, DA7219_HP_L_CTRL,
DA7219_HP_R_CTRL);
msleep(DA7219_SETTLING_DELAY);
regcache_sync_region(da7219->regmap, DA7219_MIXOUT_L_CTRL,
DA7219_MIXOUT_R_CTRL);
regcache_sync_region(da7219->regmap, DA7219_DROUTING_ST_OUTFILT_1L,
DA7219_DROUTING_ST_OUTFILT_1R);
regcache_sync_region(da7219->regmap, DA7219_MIXOUT_L_SELECT,
DA7219_MIXOUT_R_SELECT);
regcache_sync_region(da7219->regmap, DA7219_DAC_L_CTRL,
DA7219_DAC_R_CTRL);
regcache_sync_region(da7219->regmap, DA7219_DIG_ROUTING_DAC,
DA7219_DIG_ROUTING_DAC);
regcache_sync_region(da7219->regmap, DA7219_CP_CTRL, DA7219_CP_CTRL);
regcache_sync_region(da7219->regmap, DA7219_DAC_FILTERS5,
DA7219_DAC_FILTERS5);
regcache_sync_region(da7219->regmap, DA7219_DAC_FILTERS4,
DA7219_DAC_FILTERS1);
regcache_sync_region(da7219->regmap, DA7219_HP_L_GAIN,
DA7219_HP_R_GAIN);
regcache_sync_region(da7219->regmap, DA7219_DAC_L_GAIN,
DA7219_DAC_R_GAIN);
regcache_sync_region(da7219->regmap, DA7219_TONE_GEN_ON_PER,
DA7219_TONE_GEN_ON_PER);
regcache_sync_region(da7219->regmap, DA7219_TONE_GEN_FREQ1_L,
DA7219_TONE_GEN_FREQ1_U);
regcache_sync_region(da7219->regmap, DA7219_TONE_GEN_CFG1,
DA7219_TONE_GEN_CFG2);
regcache_cache_bypass(da7219->regmap, false);
/* Disable HPTest block */
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_8,
DA7219_HPTEST_EN_MASK, 0);
/*
* If we're running from the internal oscillator then give audio paths
* time to settle before allowing headphones to be driven as required.
*/
if (!(pll_srm_sts & DA7219_PLL_SRM_STS_MCLK))
msleep(DA7219_AAD_HPTEST_INT_OSC_PATH_DELAY);
/* Restore gain ramping rate */
snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL, gain_ramp_ctrl);
/* Drive Headphones/lineout */
snd_soc_update_bits(codec, DA7219_HP_L_CTRL, DA7219_HP_L_AMP_OE_MASK,
DA7219_HP_L_AMP_OE_MASK);
snd_soc_update_bits(codec, DA7219_HP_R_CTRL, DA7219_HP_R_AMP_OE_MASK,
DA7219_HP_R_AMP_OE_MASK);
/* Remove MCLK, if previously enabled */
if (da7219->mclk)
clk_disable_unprepare(da7219->mclk);
mutex_unlock(&da7219->lock);
snd_soc_dapm_mutex_unlock(dapm);
/*
* Only send report if jack hasn't been removed during process,
* otherwise it's invalid and we drop it.
*/
if (da7219_aad->jack_inserted)
snd_soc_jack_report(da7219_aad->jack, report,
SND_JACK_HEADSET | SND_JACK_LINEOUT);
}
/*
* IRQ
*/
static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
{
struct da7219_aad_priv *da7219_aad = data;
struct snd_soc_codec *codec = da7219_aad->codec;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
u8 events[DA7219_AAD_IRQ_REG_MAX];
u8 statusa;
int i, report = 0, mask = 0;
/* Read current IRQ events */
regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
events, DA7219_AAD_IRQ_REG_MAX);
if (!events[DA7219_AAD_IRQ_REG_A] && !events[DA7219_AAD_IRQ_REG_B])
return IRQ_NONE;
/* Read status register for jack insertion & type status */
statusa = snd_soc_read(codec, DA7219_ACCDET_STATUS_A);
/* Clear events */
regmap_bulk_write(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
events, DA7219_AAD_IRQ_REG_MAX);
dev_dbg(codec->dev, "IRQ events = 0x%x|0x%x, status = 0x%x\n",
events[DA7219_AAD_IRQ_REG_A], events[DA7219_AAD_IRQ_REG_B],
statusa);
if (statusa & DA7219_JACK_INSERTION_STS_MASK) {
/* Jack Insertion */
if (events[DA7219_AAD_IRQ_REG_A] &
DA7219_E_JACK_INSERTED_MASK) {
report |= SND_JACK_MECHANICAL;
mask |= SND_JACK_MECHANICAL;
da7219_aad->jack_inserted = true;
}
/* Jack type detection */
if (events[DA7219_AAD_IRQ_REG_A] &
DA7219_E_JACK_DETECT_COMPLETE_MASK) {
/*
* If 4-pole, then enable button detection, else perform
* HP impedance test to determine output type to report.
*
* We schedule work here as the tasks themselves can
* take time to complete, and in particular for hptest
* we want to be able to check if the jack was removed
* during the procedure as this will invalidate the
* result. By doing this as work, the IRQ thread can
* handle a removal, and we can check at the end of
* hptest if we have a valid result or not.
*/
if (statusa & DA7219_JACK_TYPE_STS_MASK) {
report |= SND_JACK_HEADSET;
mask |= SND_JACK_HEADSET | SND_JACK_LINEOUT;
schedule_work(&da7219_aad->btn_det_work);
} else {
schedule_work(&da7219_aad->hptest_work);
}
}
/* Button support for 4-pole jack */
if (statusa & DA7219_JACK_TYPE_STS_MASK) {
for (i = 0; i < DA7219_AAD_MAX_BUTTONS; ++i) {
/* Button Press */
if (events[DA7219_AAD_IRQ_REG_B] &
(DA7219_E_BUTTON_A_PRESSED_MASK << i)) {
report |= SND_JACK_BTN_0 >> i;
mask |= SND_JACK_BTN_0 >> i;
}
}
snd_soc_jack_report(da7219_aad->jack, report, mask);
for (i = 0; i < DA7219_AAD_MAX_BUTTONS; ++i) {
/* Button Release */
if (events[DA7219_AAD_IRQ_REG_B] &
(DA7219_E_BUTTON_A_RELEASED_MASK >> i)) {
report &= ~(SND_JACK_BTN_0 >> i);
mask |= SND_JACK_BTN_0 >> i;
}
}
}
} else {
/* Jack removal */
if (events[DA7219_AAD_IRQ_REG_A] & DA7219_E_JACK_REMOVED_MASK) {
report = 0;
mask |= DA7219_AAD_REPORT_ALL_MASK;
da7219_aad->jack_inserted = false;
/* Un-drive headphones/lineout */
snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
DA7219_HP_R_AMP_OE_MASK, 0);
snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_OE_MASK, 0);
/* Ensure button detection disabled */
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
DA7219_BUTTON_CONFIG_MASK, 0);
/* Disable mic bias */
snd_soc_dapm_disable_pin(dapm, "Mic Bias");
snd_soc_dapm_sync(dapm);
/* Cancel any pending work */
cancel_work_sync(&da7219_aad->btn_det_work);
cancel_work_sync(&da7219_aad->hptest_work);
}
}
snd_soc_jack_report(da7219_aad->jack, report, mask);
return IRQ_HANDLED;
}
/*
* DT/ACPI to pdata conversion
*/
static enum da7219_aad_micbias_pulse_lvl
da7219_aad_fw_micbias_pulse_lvl(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 2800:
return DA7219_AAD_MICBIAS_PULSE_LVL_2_8V;
case 2900:
return DA7219_AAD_MICBIAS_PULSE_LVL_2_9V;
default:
dev_warn(codec->dev, "Invalid micbias pulse level");
return DA7219_AAD_MICBIAS_PULSE_LVL_OFF;
}
}
static enum da7219_aad_btn_cfg
da7219_aad_fw_btn_cfg(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 2:
return DA7219_AAD_BTN_CFG_2MS;
case 5:
return DA7219_AAD_BTN_CFG_5MS;
case 10:
return DA7219_AAD_BTN_CFG_10MS;
case 50:
return DA7219_AAD_BTN_CFG_50MS;
case 100:
return DA7219_AAD_BTN_CFG_100MS;
case 200:
return DA7219_AAD_BTN_CFG_200MS;
case 500:
return DA7219_AAD_BTN_CFG_500MS;
default:
dev_warn(codec->dev, "Invalid button config");
return DA7219_AAD_BTN_CFG_10MS;
}
}
static enum da7219_aad_mic_det_thr
da7219_aad_fw_mic_det_thr(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 200:
return DA7219_AAD_MIC_DET_THR_200_OHMS;
case 500:
return DA7219_AAD_MIC_DET_THR_500_OHMS;
case 750:
return DA7219_AAD_MIC_DET_THR_750_OHMS;
case 1000:
return DA7219_AAD_MIC_DET_THR_1000_OHMS;
default:
dev_warn(codec->dev, "Invalid mic detect threshold");
return DA7219_AAD_MIC_DET_THR_500_OHMS;
}
}
static enum da7219_aad_jack_ins_deb
da7219_aad_fw_jack_ins_deb(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 5:
return DA7219_AAD_JACK_INS_DEB_5MS;
case 10:
return DA7219_AAD_JACK_INS_DEB_10MS;
case 20:
return DA7219_AAD_JACK_INS_DEB_20MS;
case 50:
return DA7219_AAD_JACK_INS_DEB_50MS;
case 100:
return DA7219_AAD_JACK_INS_DEB_100MS;
case 200:
return DA7219_AAD_JACK_INS_DEB_200MS;
case 500:
return DA7219_AAD_JACK_INS_DEB_500MS;
case 1000:
return DA7219_AAD_JACK_INS_DEB_1S;
default:
dev_warn(codec->dev, "Invalid jack insert debounce");
return DA7219_AAD_JACK_INS_DEB_20MS;
}
}
static enum da7219_aad_jack_det_rate
da7219_aad_fw_jack_det_rate(struct snd_soc_codec *codec, const char *str)
{
if (!strcmp(str, "32ms_64ms")) {
return DA7219_AAD_JACK_DET_RATE_32_64MS;
} else if (!strcmp(str, "64ms_128ms")) {
return DA7219_AAD_JACK_DET_RATE_64_128MS;
} else if (!strcmp(str, "128ms_256ms")) {
return DA7219_AAD_JACK_DET_RATE_128_256MS;
} else if (!strcmp(str, "256ms_512ms")) {
return DA7219_AAD_JACK_DET_RATE_256_512MS;
} else {
dev_warn(codec->dev, "Invalid jack detect rate");
return DA7219_AAD_JACK_DET_RATE_256_512MS;
}
}
static enum da7219_aad_jack_rem_deb
da7219_aad_fw_jack_rem_deb(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
return DA7219_AAD_JACK_REM_DEB_1MS;
case 5:
return DA7219_AAD_JACK_REM_DEB_5MS;
case 10:
return DA7219_AAD_JACK_REM_DEB_10MS;
case 20:
return DA7219_AAD_JACK_REM_DEB_20MS;
default:
dev_warn(codec->dev, "Invalid jack removal debounce");
return DA7219_AAD_JACK_REM_DEB_1MS;
}
}
static enum da7219_aad_btn_avg
da7219_aad_fw_btn_avg(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
return DA7219_AAD_BTN_AVG_1;
case 2:
return DA7219_AAD_BTN_AVG_2;
case 4:
return DA7219_AAD_BTN_AVG_4;
case 8:
return DA7219_AAD_BTN_AVG_8;
default:
dev_warn(codec->dev, "Invalid button average value");
return DA7219_AAD_BTN_AVG_2;
}
}
static enum da7219_aad_adc_1bit_rpt
da7219_aad_fw_adc_1bit_rpt(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
return DA7219_AAD_ADC_1BIT_RPT_1;
case 2:
return DA7219_AAD_ADC_1BIT_RPT_2;
case 4:
return DA7219_AAD_ADC_1BIT_RPT_4;
case 8:
return DA7219_AAD_ADC_1BIT_RPT_8;
default:
dev_warn(codec->dev, "Invalid ADC 1-bit repeat value");
return DA7219_AAD_ADC_1BIT_RPT_1;
}
}
static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct snd_soc_codec *codec)
{
struct device *dev = codec->dev;
struct i2c_client *i2c = to_i2c_client(dev);
struct fwnode_handle *aad_np;
struct da7219_aad_pdata *aad_pdata;
const char *fw_str;
u32 fw_val32;
aad_np = device_get_named_child_node(dev, "da7219_aad");
if (!aad_np)
return NULL;
aad_pdata = devm_kzalloc(codec->dev, sizeof(*aad_pdata), GFP_KERNEL);
if (!aad_pdata)
return NULL;
aad_pdata->irq = i2c->irq;
if (fwnode_property_read_u32(aad_np, "dlg,micbias-pulse-lvl",
&fw_val32) >= 0)
aad_pdata->micbias_pulse_lvl =
da7219_aad_fw_micbias_pulse_lvl(codec, fw_val32);
else
aad_pdata->micbias_pulse_lvl = DA7219_AAD_MICBIAS_PULSE_LVL_OFF;
if (fwnode_property_read_u32(aad_np, "dlg,micbias-pulse-time",
&fw_val32) >= 0)
aad_pdata->micbias_pulse_time = fw_val32;
if (fwnode_property_read_u32(aad_np, "dlg,btn-cfg", &fw_val32) >= 0)
aad_pdata->btn_cfg = da7219_aad_fw_btn_cfg(codec, fw_val32);
else
aad_pdata->btn_cfg = DA7219_AAD_BTN_CFG_10MS;
if (fwnode_property_read_u32(aad_np, "dlg,mic-det-thr", &fw_val32) >= 0)
aad_pdata->mic_det_thr =
da7219_aad_fw_mic_det_thr(codec, fw_val32);
else
aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
aad_pdata->jack_ins_deb =
da7219_aad_fw_jack_ins_deb(codec, fw_val32);
else
aad_pdata->jack_ins_deb = DA7219_AAD_JACK_INS_DEB_20MS;
if (!fwnode_property_read_string(aad_np, "dlg,jack-det-rate", &fw_str))
aad_pdata->jack_det_rate =
da7219_aad_fw_jack_det_rate(codec, fw_str);
else
aad_pdata->jack_det_rate = DA7219_AAD_JACK_DET_RATE_256_512MS;
if (fwnode_property_read_u32(aad_np, "dlg,jack-rem-deb", &fw_val32) >= 0)
aad_pdata->jack_rem_deb =
da7219_aad_fw_jack_rem_deb(codec, fw_val32);
else
aad_pdata->jack_rem_deb = DA7219_AAD_JACK_REM_DEB_1MS;
if (fwnode_property_read_u32(aad_np, "dlg,a-d-btn-thr", &fw_val32) >= 0)
aad_pdata->a_d_btn_thr = (u8) fw_val32;
else
aad_pdata->a_d_btn_thr = 0xA;
if (fwnode_property_read_u32(aad_np, "dlg,d-b-btn-thr", &fw_val32) >= 0)
aad_pdata->d_b_btn_thr = (u8) fw_val32;
else
aad_pdata->d_b_btn_thr = 0x16;
if (fwnode_property_read_u32(aad_np, "dlg,b-c-btn-thr", &fw_val32) >= 0)
aad_pdata->b_c_btn_thr = (u8) fw_val32;
else
aad_pdata->b_c_btn_thr = 0x21;
if (fwnode_property_read_u32(aad_np, "dlg,c-mic-btn-thr", &fw_val32) >= 0)
aad_pdata->c_mic_btn_thr = (u8) fw_val32;
else
aad_pdata->c_mic_btn_thr = 0x3E;
if (fwnode_property_read_u32(aad_np, "dlg,btn-avg", &fw_val32) >= 0)
aad_pdata->btn_avg = da7219_aad_fw_btn_avg(codec, fw_val32);
else
aad_pdata->btn_avg = DA7219_AAD_BTN_AVG_2;
if (fwnode_property_read_u32(aad_np, "dlg,adc-1bit-rpt", &fw_val32) >= 0)
aad_pdata->adc_1bit_rpt =
da7219_aad_fw_adc_1bit_rpt(codec, fw_val32);
else
aad_pdata->adc_1bit_rpt = DA7219_AAD_ADC_1BIT_RPT_1;
return aad_pdata;
}
static void da7219_aad_handle_pdata(struct snd_soc_codec *codec)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
struct da7219_aad_priv *da7219_aad = da7219->aad;
struct da7219_pdata *pdata = da7219->pdata;
if ((pdata) && (pdata->aad_pdata)) {
struct da7219_aad_pdata *aad_pdata = pdata->aad_pdata;
u8 cfg, mask;
da7219_aad->irq = aad_pdata->irq;
switch (aad_pdata->micbias_pulse_lvl) {
case DA7219_AAD_MICBIAS_PULSE_LVL_2_8V:
case DA7219_AAD_MICBIAS_PULSE_LVL_2_9V:
da7219_aad->micbias_pulse_lvl =
(aad_pdata->micbias_pulse_lvl <<
DA7219_MICBIAS1_LEVEL_SHIFT);
break;
default:
break;
}
da7219_aad->micbias_pulse_time = aad_pdata->micbias_pulse_time;
switch (aad_pdata->btn_cfg) {
case DA7219_AAD_BTN_CFG_2MS:
case DA7219_AAD_BTN_CFG_5MS:
case DA7219_AAD_BTN_CFG_10MS:
case DA7219_AAD_BTN_CFG_50MS:
case DA7219_AAD_BTN_CFG_100MS:
case DA7219_AAD_BTN_CFG_200MS:
case DA7219_AAD_BTN_CFG_500MS:
da7219_aad->btn_cfg = (aad_pdata->btn_cfg <<
DA7219_BUTTON_CONFIG_SHIFT);
}
cfg = 0;
mask = 0;
switch (aad_pdata->mic_det_thr) {
case DA7219_AAD_MIC_DET_THR_200_OHMS:
case DA7219_AAD_MIC_DET_THR_500_OHMS:
case DA7219_AAD_MIC_DET_THR_750_OHMS:
case DA7219_AAD_MIC_DET_THR_1000_OHMS:
cfg |= (aad_pdata->mic_det_thr <<
DA7219_MIC_DET_THRESH_SHIFT);
mask |= DA7219_MIC_DET_THRESH_MASK;
}
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1, mask, cfg);
cfg = 0;
mask = 0;
switch (aad_pdata->jack_ins_deb) {
case DA7219_AAD_JACK_INS_DEB_5MS:
case DA7219_AAD_JACK_INS_DEB_10MS:
case DA7219_AAD_JACK_INS_DEB_20MS:
case DA7219_AAD_JACK_INS_DEB_50MS:
case DA7219_AAD_JACK_INS_DEB_100MS:
case DA7219_AAD_JACK_INS_DEB_200MS:
case DA7219_AAD_JACK_INS_DEB_500MS:
case DA7219_AAD_JACK_INS_DEB_1S:
cfg |= (aad_pdata->jack_ins_deb <<
DA7219_JACKDET_DEBOUNCE_SHIFT);
mask |= DA7219_JACKDET_DEBOUNCE_MASK;
}
switch (aad_pdata->jack_det_rate) {
case DA7219_AAD_JACK_DET_RATE_32_64MS:
case DA7219_AAD_JACK_DET_RATE_64_128MS:
case DA7219_AAD_JACK_DET_RATE_128_256MS:
case DA7219_AAD_JACK_DET_RATE_256_512MS:
cfg |= (aad_pdata->jack_det_rate <<
DA7219_JACK_DETECT_RATE_SHIFT);
mask |= DA7219_JACK_DETECT_RATE_MASK;
}
switch (aad_pdata->jack_rem_deb) {
case DA7219_AAD_JACK_REM_DEB_1MS:
case DA7219_AAD_JACK_REM_DEB_5MS:
case DA7219_AAD_JACK_REM_DEB_10MS:
case DA7219_AAD_JACK_REM_DEB_20MS:
cfg |= (aad_pdata->jack_rem_deb <<
DA7219_JACKDET_REM_DEB_SHIFT);
mask |= DA7219_JACKDET_REM_DEB_MASK;
}
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_2, mask, cfg);
snd_soc_write(codec, DA7219_ACCDET_CONFIG_3,
aad_pdata->a_d_btn_thr);
snd_soc_write(codec, DA7219_ACCDET_CONFIG_4,
aad_pdata->d_b_btn_thr);
snd_soc_write(codec, DA7219_ACCDET_CONFIG_5,
aad_pdata->b_c_btn_thr);
snd_soc_write(codec, DA7219_ACCDET_CONFIG_6,
aad_pdata->c_mic_btn_thr);
cfg = 0;
mask = 0;
switch (aad_pdata->btn_avg) {
case DA7219_AAD_BTN_AVG_1:
case DA7219_AAD_BTN_AVG_2:
case DA7219_AAD_BTN_AVG_4:
case DA7219_AAD_BTN_AVG_8:
cfg |= (aad_pdata->btn_avg <<
DA7219_BUTTON_AVERAGE_SHIFT);
mask |= DA7219_BUTTON_AVERAGE_MASK;
}
switch (aad_pdata->adc_1bit_rpt) {
case DA7219_AAD_ADC_1BIT_RPT_1:
case DA7219_AAD_ADC_1BIT_RPT_2:
case DA7219_AAD_ADC_1BIT_RPT_4:
case DA7219_AAD_ADC_1BIT_RPT_8:
cfg |= (aad_pdata->adc_1bit_rpt <<
DA7219_ADC_1_BIT_REPEAT_SHIFT);
mask |= DA7219_ADC_1_BIT_REPEAT_MASK;
}
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_7, mask, cfg);
}
}
/*
* Suspend/Resume
*/
void da7219_aad_suspend(struct snd_soc_codec *codec)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
struct da7219_aad_priv *da7219_aad = da7219->aad;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
u8 micbias_ctrl;
if (da7219_aad->jack) {
/* Disable jack detection during suspend */
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
DA7219_ACCDET_EN_MASK, 0);
/*
* If we have a 4-pole jack inserted, then micbias will be
* enabled. We can disable micbias here, and keep a note to
* re-enable it on resume. If jack removal occurred during
* suspend then this will be dealt with through the IRQ handler.
*/
if (da7219_aad->jack_inserted) {
micbias_ctrl = snd_soc_read(codec, DA7219_MICBIAS_CTRL);
if (micbias_ctrl & DA7219_MICBIAS1_EN_MASK) {
snd_soc_dapm_disable_pin(dapm, "Mic Bias");
snd_soc_dapm_sync(dapm);
da7219_aad->micbias_resume_enable = true;
}
}
}
}
void da7219_aad_resume(struct snd_soc_codec *codec)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
struct da7219_aad_priv *da7219_aad = da7219->aad;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
if (da7219_aad->jack) {
/* Re-enable micbias if previously enabled for 4-pole jack */
if (da7219_aad->jack_inserted &&
da7219_aad->micbias_resume_enable) {
snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
snd_soc_dapm_sync(dapm);
da7219_aad->micbias_resume_enable = false;
}
/* Re-enable jack detection */
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
DA7219_ACCDET_EN_MASK,
DA7219_ACCDET_EN_MASK);
}
}
/*
* Init/Exit
*/
int da7219_aad_init(struct snd_soc_codec *codec)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
struct da7219_aad_priv *da7219_aad;
u8 mask[DA7219_AAD_IRQ_REG_MAX];
int ret;
da7219_aad = devm_kzalloc(codec->dev, sizeof(*da7219_aad), GFP_KERNEL);
if (!da7219_aad)
return -ENOMEM;
da7219->aad = da7219_aad;
da7219_aad->codec = codec;
/* Handle any DT/ACPI/platform data */
if (da7219->pdata && !da7219->pdata->aad_pdata)
da7219->pdata->aad_pdata = da7219_aad_fw_to_pdata(codec);
da7219_aad_handle_pdata(codec);
/* Disable button detection */
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
DA7219_BUTTON_CONFIG_MASK, 0);
INIT_WORK(&da7219_aad->btn_det_work, da7219_aad_btn_det_work);
INIT_WORK(&da7219_aad->hptest_work, da7219_aad_hptest_work);
ret = request_threaded_irq(da7219_aad->irq, NULL,
da7219_aad_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"da7219-aad", da7219_aad);
if (ret) {
dev_err(codec->dev, "Failed to request IRQ: %d\n", ret);
return ret;
}
/* Unmask AAD IRQs */
memset(mask, 0, DA7219_AAD_IRQ_REG_MAX);
regmap_bulk_write(da7219->regmap, DA7219_ACCDET_IRQ_MASK_A,
&mask, DA7219_AAD_IRQ_REG_MAX);
return 0;
}
EXPORT_SYMBOL_GPL(da7219_aad_init);
void da7219_aad_exit(struct snd_soc_codec *codec)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
struct da7219_aad_priv *da7219_aad = da7219->aad;
u8 mask[DA7219_AAD_IRQ_REG_MAX];
/* Mask off AAD IRQs */
memset(mask, DA7219_BYTE_MASK, DA7219_AAD_IRQ_REG_MAX);
regmap_bulk_write(da7219->regmap, DA7219_ACCDET_IRQ_MASK_A,
mask, DA7219_AAD_IRQ_REG_MAX);
free_irq(da7219_aad->irq, da7219_aad);
cancel_work_sync(&da7219_aad->btn_det_work);
cancel_work_sync(&da7219_aad->hptest_work);
}
EXPORT_SYMBOL_GPL(da7219_aad_exit);
MODULE_DESCRIPTION("ASoC DA7219 AAD Driver");
MODULE_AUTHOR("Adam Thomson <Adam.Thomson.Opensource@diasemi.com>");
MODULE_LICENSE("GPL");
| null | null | null | null | 79,165 |
834 | null |
train_val
|
c536b6be1a72aefd632d5530106a67c516cb9f4b
| 257,221 |
openssl
| 0 |
https://github.com/openssl/openssl
|
2016-09-22 23:12:38+01:00
|
/*
* Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
/* Internal ASN1 structures and functions: not for application use */
int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d);
int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d);
/* ASN1 scan context structure */
struct asn1_sctx_st {
/* The ASN1_ITEM associated with this field */
const ASN1_ITEM *it;
/* If ASN1_TEMPLATE associated with this field */
const ASN1_TEMPLATE *tt;
/* Various flags associated with field and context */
unsigned long flags;
/* If SEQUENCE OF or SET OF, field index */
int skidx;
/* ASN1 depth of field */
int depth;
/* Structure and field name */
const char *sname, *fname;
/* If a primitive type the type of underlying field */
int prim_type;
/* The field value itself */
ASN1_VALUE **field;
/* Callback to pass information to */
int (*scan_cb) (ASN1_SCTX *ctx);
/* Context specific application data */
void *app_data;
} /* ASN1_SCTX */ ;
typedef struct mime_param_st MIME_PARAM;
DEFINE_STACK_OF(MIME_PARAM)
typedef struct mime_header_st MIME_HEADER;
DEFINE_STACK_OF(MIME_HEADER)
/* Month values for printing out times */
extern const char *_asn1_mon[12];
int asn1_get_choice_selector(ASN1_VALUE **pval, const ASN1_ITEM *it);
int asn1_set_choice_selector(ASN1_VALUE **pval, int value,
const ASN1_ITEM *it);
ASN1_VALUE **asn1_get_field_ptr(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt);
const ASN1_TEMPLATE *asn1_do_adb(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt,
int nullerr);
int asn1_do_lock(ASN1_VALUE **pval, int op, const ASN1_ITEM *it);
void asn1_enc_init(ASN1_VALUE **pval, const ASN1_ITEM *it);
void asn1_enc_free(ASN1_VALUE **pval, const ASN1_ITEM *it);
int asn1_enc_restore(int *len, unsigned char **out, ASN1_VALUE **pval,
const ASN1_ITEM *it);
int asn1_enc_save(ASN1_VALUE **pval, const unsigned char *in, int inlen,
const ASN1_ITEM *it);
void asn1_primitive_free(ASN1_VALUE **pval, const ASN1_ITEM *it);
void asn1_template_free(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt);
ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **a, const unsigned char **pp,
long length);
int i2c_ASN1_BIT_STRING(ASN1_BIT_STRING *a, unsigned char **pp);
ASN1_BIT_STRING *c2i_ASN1_BIT_STRING(ASN1_BIT_STRING **a,
const unsigned char **pp, long length);
int i2c_ASN1_INTEGER(ASN1_INTEGER *a, unsigned char **pp);
ASN1_INTEGER *c2i_ASN1_INTEGER(ASN1_INTEGER **a, const unsigned char **pp,
long length);
| null | null | null | null | 118,666 |
71,537 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 71,537 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SERVICES_RESOURCE_COORDINATOR_MEMORY_INSTRUMENTATION_GRAPH_PROCESSOR_H_
#define SERVICES_RESOURCE_COORDINATOR_MEMORY_INSTRUMENTATION_GRAPH_PROCESSOR_H_
#include <memory>
#include "base/process/process_handle.h"
#include "base/trace_event/process_memory_dump.h"
#include "services/resource_coordinator/memory_instrumentation/graph.h"
namespace memory_instrumentation {
class GraphProcessor {
public:
// This map does not own the pointers inside.
using MemoryDumpMap =
std::map<base::ProcessId, const base::trace_event::ProcessMemoryDump*>;
static std::unique_ptr<GlobalDumpGraph> CreateMemoryGraph(
const MemoryDumpMap& process_dumps);
static void RemoveWeakNodesFromGraph(GlobalDumpGraph* global_graph);
static void AddOverheadsAndPropogateEntries(GlobalDumpGraph* global_graph);
static void CalculateSizesForGraph(GlobalDumpGraph* global_graph);
static std::map<base::ProcessId, uint64_t> ComputeSharedFootprintFromGraph(
const GlobalDumpGraph& global_graph);
private:
friend class GraphProcessorTest;
static void CollectAllocatorDumps(
const base::trace_event::ProcessMemoryDump& source,
GlobalDumpGraph* global_graph,
GlobalDumpGraph::Process* process_graph);
static void AddEdges(const base::trace_event::ProcessMemoryDump& source,
GlobalDumpGraph* global_graph);
static void MarkImplicitWeakParentsRecursively(GlobalDumpGraph::Node* node);
static void MarkWeakOwnersAndChildrenRecursively(
GlobalDumpGraph::Node* node,
std::set<const GlobalDumpGraph::Node*>* nodes);
static void RemoveWeakNodesRecursively(GlobalDumpGraph::Node* parent);
static void AssignTracingOverhead(base::StringPiece allocator,
GlobalDumpGraph* global_graph,
GlobalDumpGraph::Process* process);
static GlobalDumpGraph::Node::Entry AggregateNumericWithNameForNode(
GlobalDumpGraph::Node* node,
base::StringPiece name);
static void AggregateNumericsRecursively(GlobalDumpGraph::Node* node);
static void PropagateNumericsAndDiagnosticsRecursively(
GlobalDumpGraph::Node* node);
static base::Optional<uint64_t> AggregateSizeForDescendantNode(
GlobalDumpGraph::Node* root,
GlobalDumpGraph::Node* descendant);
static void CalculateSizeForNode(GlobalDumpGraph::Node* node);
/**
* Calculate not-owned and not-owning sub-sizes of a memory allocator dump
* from its children's (sub-)sizes.
*
* Not-owned sub-size refers to the aggregated memory of all children which
* is not owned by other MADs. Conversely, not-owning sub-size is the
* aggregated memory of all children which do not own another MAD. The
* diagram below illustrates these two concepts:
*
* ROOT 1 ROOT 2
* size: 4 size: 5
* not-owned sub-size: 4 not-owned sub-size: 1 (!)
* not-owning sub-size: 0 (!) not-owning sub-size: 5
*
* ^ ^
* | |
*
* PARENT 1 ===== owns =====> PARENT 2
* size: 4 size: 5
* not-owned sub-size: 4 not-owned sub-size: 5
* not-owning sub-size: 4 not-owning sub-size: 5
*
* ^ ^
* | |
*
* CHILD 1 CHILD 2
* size [given]: 4 size [given]: 5
* not-owned sub-size: 4 not-owned sub-size: 5
* not-owning sub-size: 4 not-owning sub-size: 5
*
* This method assumes that (1) the size of the dump, its children, and its
* owners [see calculateSizes()] and (2) the not-owned and not-owning
* sub-sizes of both the children and owners of the dump have already been
* calculated [depth-first post-order traversal].
*/
static void CalculateDumpSubSizes(GlobalDumpGraph::Node* node);
/**
* Calculate owned and owning coefficients of a memory allocator dump and
* its owners.
*
* The owning coefficient refers to the proportion of a dump's not-owning
* sub-size which is attributed to the dump (only relevant to owning MADs).
* Conversely, the owned coefficient is the proportion of a dump's
* not-owned sub-size, which is attributed to it (only relevant to owned
* MADs).
*
* The not-owned size of the owned dump is split among its owners in the
* order of the ownership importance as demonstrated by the following
* example:
*
* memory allocator dumps
* OWNED OWNER1 OWNER2 OWNER3 OWNER4
* not-owned sub-size [given] 10 - - - -
* not-owning sub-size [given] - 6 7 5 8
* importance [given] - 2 2 1 0
* attributed not-owned sub-size 2 - - - -
* attributed not-owning sub-size - 3 4 0 1
* owned coefficient 2/10 - - - -
* owning coefficient - 3/6 4/7 0/5 1/8
*
* Explanation: Firstly, 6 bytes are split equally among OWNER1 and OWNER2
* (highest importance). OWNER2 owns one more byte, so its attributed
* not-owning sub-size is 6/2 + 1 = 4 bytes. OWNER3 is attributed no size
* because it is smaller than the owners with higher priority. However,
* OWNER4 is larger, so it's attributed the difference 8 - 7 = 1 byte.
* Finally, 2 bytes remain unattributed and are hence kept in the OWNED
* dump as attributed not-owned sub-size. The coefficients are then
* directly calculated as fractions of the sub-sizes and corresponding
* attributed sub-sizes.
*
* Note that we always assume that all ownerships of a dump overlap (e.g.
* OWNER3 is subsumed by both OWNER1 and OWNER2). Hence, the table could
* be alternatively represented as follows:
*
* owned memory range
* 0 1 2 3 4 5 6 7 8 9 10
* Priority 2 | OWNER1 + OWNER2 (split) | OWNER2 |
* Priority 1 | (already attributed) |
* Priority 0 | - - - (already attributed) - - - | OWNER4 |
* Remainder | - - - - - (already attributed) - - - - - - | OWNED |
*
* This method assumes that (1) the size of the dump [see calculateSizes()]
* and (2) the not-owned size of the dump and not-owning sub-sizes of its
* owners [see the first step of calculateEffectiveSizes()] have already
* been calculated. Note that the method doesn't make any assumptions about
* the order in which dumps are visited.
*/
static void CalculateDumpOwnershipCoefficient(GlobalDumpGraph::Node* node);
/**
* Calculate cumulative owned and owning coefficients of a memory allocator
* dump from its (non-cumulative) owned and owning coefficients and the
* cumulative coefficients of its parent and/or owned dump.
*
* The cumulative coefficients represent the total effect of all
* (non-strict) ancestor ownerships on a memory allocator dump. The
* cumulative owned coefficient of a MAD can be calculated simply as:
*
* cumulativeOwnedC(M) = ownedC(M) * cumulativeOwnedC(parent(M))
*
* This reflects the assumption that if a parent of a child MAD is
* (partially) owned, then the parent's owner also indirectly owns (a part
* of) the child MAD.
*
* The cumulative owning coefficient of a MAD depends on whether the MAD
* owns another dump:
*
* [if M doesn't own another MAD]
* / cumulativeOwningC(parent(M))
* cumulativeOwningC(M) =
* \ [if M owns another MAD]
* owningC(M) * cumulativeOwningC(owned(M))
*
* The reasoning behind the first case is similar to the one for cumulative
* owned coefficient above. The only difference is that we don't need to
* include the dump's (non-cumulative) owning coefficient because it is
* implicitly 1.
*
* The formula for the second case is derived as follows: Since the MAD
* owns another dump, its memory is not included in its parent's not-owning
* sub-size and hence shouldn't be affected by the parent's corresponding
* cumulative coefficient. Instead, the MAD indirectly owns everything
* owned by its owned dump (and so it should be affected by the
* corresponding coefficient).
*
* Note that undefined coefficients (and coefficients of non-existent
* dumps) are implicitly assumed to be 1.
*
* This method assumes that (1) the size of the dump [see calculateSizes()],
* (2) the (non-cumulative) owned and owning coefficients of the dump [see
* the second step of calculateEffectiveSizes()], and (3) the cumulative
* coefficients of the dump's parent and owned MADs (if present)
* [depth-first pre-order traversal] have already been calculated.
*/
static void CalculateDumpCumulativeOwnershipCoefficient(
GlobalDumpGraph::Node* node);
/**
* Calculate the effective size of a memory allocator dump.
*
* In order to simplify the (already complex) calculation, we use the fact
* that effective size is cumulative (unlike regular size), i.e. the
* effective size of a non-leaf node is equal to the sum of effective sizes
* of its children. The effective size of a leaf MAD is calculated as:
*
* effectiveSize(M) = size(M) * cumulativeOwningC(M) * cumulativeOwnedC(M)
*
* This method assumes that (1) the size of the dump and its children [see
* calculateSizes()] and (2) the cumulative owning and owned coefficients
* of the dump (if it's a leaf node) [see the third step of
* calculateEffectiveSizes()] or the effective sizes of its children (if
* it's a non-leaf node) [depth-first post-order traversal] have already
* been calculated.
*/
static void CalculateDumpEffectiveSize(GlobalDumpGraph::Node* node);
};
} // namespace memory_instrumentation
#endif
| null | null | null | null | 68,400 |
10,861 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 10,861 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mojo/edk/system/ports/name.h"
namespace mojo {
namespace edk {
namespace ports {
const PortName kInvalidPortName = {0, 0};
const NodeName kInvalidNodeName = {0, 0};
std::ostream& operator<<(std::ostream& stream, const Name& name) {
std::ios::fmtflags flags(stream.flags());
stream << std::hex << std::uppercase << name.v1;
if (name.v2 != 0)
stream << '.' << name.v2;
stream.flags(flags);
return stream;
}
} // namespace ports
} // namespace edk
} // namespace mojo
| null | null | null | null | 7,724 |
65,813 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 65,813 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_TRANSLATE_ANDROID_TRANSLATE_UTILS_H_
#define CHROME_BROWSER_TRANSLATE_ANDROID_TRANSLATE_UTILS_H_
#include "base/android/jni_android.h"
#include "base/android/scoped_java_ref.h"
namespace translate {
class TranslateInfoBarDelegate;
}
class TranslateUtils {
public:
// A Java counterpart will be generated for this enum.
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.chrome.browser.infobar
// GENERATED_JAVA_PREFIX_TO_STRIP:OPTION_
enum TranslateOption {
OPTION_SOURCE_CODE,
OPTION_TARGET_CODE,
OPTION_ALWAYS_TRANSLATE,
OPTION_NEVER_TRANSLATE,
OPTION_NEVER_TRANSLATE_SITE
};
// A Java counterpart will be generated for this enum.
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.chrome.browser.infobar
// GENERATED_JAVA_PREFIX_TO_STRIP:TYPE_
enum TranslateSnackbarType {
TYPE_NONE,
TYPE_ALWAYS_TRANSLATE,
TYPE_NEVER_TRANSLATE,
TYPE_NEVER_TRANSLATE_SITE
};
static base::android::ScopedJavaLocalRef<jobjectArray> GetJavaLanguages(
JNIEnv* env,
translate::TranslateInfoBarDelegate* delegate);
static base::android::ScopedJavaLocalRef<jobjectArray> GetJavaLanguageCodes(
JNIEnv* env,
translate::TranslateInfoBarDelegate* delegate);
static base::android::ScopedJavaLocalRef<jintArray> GetJavaLanguageHashCodes(
JNIEnv* env,
translate::TranslateInfoBarDelegate* delegate);
};
#endif // CHROME_BROWSER_TRANSLATE_ANDROID_TRANSLATE_UTILS_H_
| null | null | null | null | 62,676 |
24,158 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 24,158 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "build/build_config.h"
#include "content/browser/download/save_file_manager.h"
#include "base/bind.h"
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/strings/string_util.h"
#include "components/download/public/common/download_task_runner.h"
#include "content/browser/child_process_security_policy_impl.h"
#include "content/browser/download/save_file.h"
#include "content/browser/download/save_package.h"
#include "content/browser/renderer_host/render_view_host_impl.h"
#include "content/browser/web_contents/web_contents_impl.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/render_frame_host.h"
#include "content/public/browser/resource_context.h"
#include "content/public/browser/storage_partition.h"
#include "content/public/common/previews_state.h"
#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
#include "net/traffic_annotation/network_traffic_annotation.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_job_factory.h"
#include "services/network/public/cpp/simple_url_loader.h"
#include "services/network/public/cpp/simple_url_loader_stream_consumer.h"
#include "services/network/public/mojom/url_loader_factory.mojom.h"
#include "url/gurl.h"
namespace content {
namespace {
// Pointer to the singleton SaveFileManager instance.
static SaveFileManager* g_save_file_manager = nullptr;
} // namespace
class SaveFileManager::SimpleURLLoaderHelper
: public network::SimpleURLLoaderStreamConsumer {
public:
static std::unique_ptr<SimpleURLLoaderHelper> CreateAndStartDownload(
std::unique_ptr<network::ResourceRequest> resource_request,
SaveItemId save_item_id,
SavePackageId save_package_id,
int render_process_id,
int render_frame_routing_id,
const net::NetworkTrafficAnnotationTag& annotation_tag,
network::mojom::URLLoaderFactory* url_loader_factory,
SaveFileManager* save_file_manager) {
return std::unique_ptr<SimpleURLLoaderHelper>(new SimpleURLLoaderHelper(
std::move(resource_request), save_item_id, save_package_id,
render_process_id, render_frame_routing_id, annotation_tag,
url_loader_factory, save_file_manager));
}
~SimpleURLLoaderHelper() override = default;
private:
SimpleURLLoaderHelper(
std::unique_ptr<network::ResourceRequest> resource_request,
SaveItemId save_item_id,
SavePackageId save_package_id,
int render_process_id,
int render_frame_routing_id,
const net::NetworkTrafficAnnotationTag& annotation_tag,
network::mojom::URLLoaderFactory* url_loader_factory,
SaveFileManager* save_file_manager)
: save_file_manager_(save_file_manager),
save_item_id_(save_item_id),
save_package_id_(save_package_id) {
GURL url = resource_request->url;
url_loader_ = network::SimpleURLLoader::Create(std::move(resource_request),
annotation_tag);
// We can use Unretained below as |url_loader_| is owned by |this|, so the
// callback won't be invoked if |this| gets deleted.
url_loader_->SetOnResponseStartedCallback(base::BindOnce(
&SimpleURLLoaderHelper::OnResponseStarted, base::Unretained(this), url,
render_process_id, render_frame_routing_id));
url_loader_->DownloadAsStream(url_loader_factory, this);
}
void OnResponseStarted(GURL url,
int render_process_id,
int render_frame_routing_id,
const GURL& final_url,
const network::ResourceResponseHead& response_head) {
std::string content_disposition;
if (response_head.headers) {
response_head.headers->GetNormalizedHeader("Content-Disposition",
&content_disposition);
}
auto info = std::make_unique<SaveFileCreateInfo>(
url, final_url, save_item_id_, save_package_id_, render_process_id,
render_frame_routing_id, content_disposition);
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&SaveFileManager::StartSave,
save_file_manager_, std::move(info)));
}
// network::SimpleURLLoaderStreamConsumer implementation:
void OnDataReceived(base::StringPiece string_piece,
base::OnceClosure resume) override {
// TODO(jcivelli): we should make threading sane and avoid copying
// |string_piece| bytes.
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SaveFileManager::UpdateSaveProgress, save_file_manager_,
save_item_id_, string_piece.as_string()));
std::move(resume).Run();
}
void OnComplete(bool success) override {
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SaveFileManager::SaveFinished, save_file_manager_,
save_item_id_, save_package_id_, success));
}
void OnRetry(base::OnceClosure start_retry) override {
// Retries are not enabled.
NOTREACHED();
}
SaveFileManager* save_file_manager_;
SaveItemId save_item_id_;
SavePackageId save_package_id_;
std::unique_ptr<network::SimpleURLLoader> url_loader_;
DISALLOW_COPY_AND_ASSIGN(SimpleURLLoaderHelper);
};
SaveFileManager::SaveFileManager() {
DCHECK(g_save_file_manager == nullptr);
g_save_file_manager = this;
}
SaveFileManager::~SaveFileManager() {
// Check for clean shutdown.
DCHECK(save_file_map_.empty());
DCHECK(g_save_file_manager);
g_save_file_manager = nullptr;
}
// static
SaveFileManager* SaveFileManager::Get() {
return g_save_file_manager;
}
// Called during the browser shutdown process to clean up any state (open files,
// timers) that live on the saving thread (file thread).
void SaveFileManager::Shutdown() {
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&SaveFileManager::OnShutdown, this));
}
// Stop file thread operations.
void SaveFileManager::OnShutdown() {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
save_file_map_.clear();
}
SaveFile* SaveFileManager::LookupSaveFile(SaveItemId save_item_id) {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
auto it = save_file_map_.find(save_item_id);
return it == save_file_map_.end() ? nullptr : it->second.get();
}
// Look up a SavePackage according to a save id.
SavePackage* SaveFileManager::LookupPackage(SaveItemId save_item_id) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
auto it = packages_.find(save_item_id);
if (it != packages_.end())
return it->second;
return nullptr;
}
// Call from SavePackage for starting a saving job
void SaveFileManager::SaveURL(SaveItemId save_item_id,
const GURL& url,
const Referrer& referrer,
int render_process_host_id,
int render_view_routing_id,
int render_frame_routing_id,
SaveFileCreateInfo::SaveFileSource save_source,
const base::FilePath& file_full_path,
ResourceContext* context,
StoragePartition* storage_partition,
SavePackage* save_package) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
// Insert started saving job to tracking list.
DCHECK(packages_.find(save_item_id) == packages_.end());
packages_[save_item_id] = save_package;
// Register a saving job.
if (save_source == SaveFileCreateInfo::SAVE_FILE_FROM_NET) {
DCHECK(url.is_valid());
// Starts the actual download.
if (!ChildProcessSecurityPolicyImpl::GetInstance()->CanRequestURL(
render_process_host_id, url)) {
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SaveFileManager::SaveFinished, this, save_item_id,
save_package->id(), /*success=*/false));
return;
}
net::NetworkTrafficAnnotationTag traffic_annotation =
net::DefineNetworkTrafficAnnotation("save_file_manager", R"(
semantics {
sender: "Save File"
description: "Saving url to local file."
trigger:
"User clicks on 'Save link as...' context menu command to save a "
"link."
data: "None."
destination: WEBSITE
}
policy {
cookies_allowed: YES
cookies_store: "user"
setting:
"This feature cannot be disable by settings. The request is made "
"only if user chooses 'Save link as...' in context menu."
policy_exception_justification: "Not implemented."
})");
auto request = std::make_unique<network::ResourceRequest>();
request->url = url;
request->referrer = referrer.url;
request->priority = net::DEFAULT_PRIORITY;
request->load_flags = net::LOAD_SKIP_CACHE_VALIDATION;
url_loader_helpers_[save_item_id] =
SimpleURLLoaderHelper::CreateAndStartDownload(
std::move(request), save_item_id, save_package->id(),
render_process_host_id, render_frame_routing_id, traffic_annotation,
storage_partition->GetURLLoaderFactoryForBrowserProcess().get(),
this);
} else {
// We manually start the save job.
auto info = std::make_unique<SaveFileCreateInfo>(
file_full_path, url, save_item_id, save_package->id(),
render_process_host_id, render_frame_routing_id, save_source);
// Since the data will come from render process, so we need to start
// this kind of save job by ourself.
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SaveFileManager::StartSave, this, std::move(info)));
}
}
// Utility function for look up table maintenance, called on the UI thread.
// A manager may have multiple save page job (SavePackage) in progress,
// so we just look up the save id and remove it from the tracking table.
void SaveFileManager::RemoveSaveFile(SaveItemId save_item_id,
SavePackage* save_package) {
DCHECK(save_package);
DCHECK_CURRENTLY_ON(BrowserThread::UI);
// A save page job (SavePackage) can only have one manager,
// so remove it if it exists.
auto it = packages_.find(save_item_id);
if (it != packages_.end())
packages_.erase(it);
}
// Static
SavePackage* SaveFileManager::GetSavePackageFromRenderIds(
int render_process_id,
int render_frame_routing_id) {
RenderFrameHost* render_frame_host =
RenderFrameHost::FromID(render_process_id, render_frame_routing_id);
if (!render_frame_host)
return nullptr;
WebContentsImpl* web_contents =
static_cast<WebContentsImpl*>(WebContents::FromRenderFrameHost(
render_frame_host));
if (!web_contents)
return nullptr;
return web_contents->save_package();
}
void SaveFileManager::DeleteDirectoryOrFile(const base::FilePath& full_path,
bool is_dir) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&SaveFileManager::OnDeleteDirectoryOrFile, this,
full_path, is_dir));
}
void SaveFileManager::SendCancelRequest(SaveItemId save_item_id) {
// Cancel the request which has specific save id.
DCHECK(!save_item_id.is_null());
download::GetDownloadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SaveFileManager::CancelSave, this, save_item_id));
}
// Notifications sent from the IO thread and run on the file thread:
// The IO thread created |info|, but the file thread (this method) uses it
// to create a SaveFile which will hold and finally destroy |info|. It will
// then passes |info| to the UI thread for reporting saving status.
void SaveFileManager::StartSave(std::unique_ptr<SaveFileCreateInfo> info) {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
DCHECK(info);
// No need to calculate hash.
auto save_file =
std::make_unique<SaveFile>(std::move(info), /*calculate_hash=*/false);
// TODO(phajdan.jr): We should check the return value and handle errors here.
save_file->Initialize();
const SaveFileCreateInfo& save_file_create_info = save_file->create_info();
DCHECK(!LookupSaveFile(save_file->save_item_id()));
save_file_map_[save_file->save_item_id()] = std::move(save_file);
BrowserThread::PostTask(BrowserThread::UI, FROM_HERE,
base::BindOnce(&SaveFileManager::OnStartSave, this,
save_file_create_info));
}
// We do forward an update to the UI thread here, since we do not use timer to
// update the UI. If the user has canceled the saving action (in the UI
// thread). We may receive a few more updates before the IO thread gets the
// cancel message. We just delete the data since the SaveFile has been deleted.
void SaveFileManager::UpdateSaveProgress(SaveItemId save_item_id,
const std::string& data) {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
SaveFile* save_file = LookupSaveFile(save_item_id);
if (save_file) {
DCHECK(save_file->InProgress());
download::DownloadInterruptReason reason =
save_file->AppendDataToFile(data.data(), data.size());
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::BindOnce(&SaveFileManager::OnUpdateSaveProgress, this,
save_file->save_item_id(), save_file->BytesSoFar(),
reason == download::DOWNLOAD_INTERRUPT_REASON_NONE));
}
}
// The IO thread will call this when saving is completed or it got error when
// fetching data. We forward the message to OnSaveFinished in UI thread.
void SaveFileManager::SaveFinished(SaveItemId save_item_id,
SavePackageId save_package_id,
bool is_success) {
DVLOG(20) << __func__ << "() save_item_id = " << save_item_id
<< " save_package_id = " << save_package_id
<< " is_success = " << is_success;
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
int64_t bytes_so_far = 0;
SaveFile* save_file = LookupSaveFile(save_item_id);
// Note that we might not have a save_file: canceling starts on the download
// thread but the load is canceled on the UI thread. The request might finish
// while thread hoping.
if (save_file) {
DCHECK(save_file->InProgress());
DVLOG(20) << __func__ << "() save_file = " << save_file->DebugString();
bytes_so_far = save_file->BytesSoFar();
save_file->Finish();
save_file->Detach();
}
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::BindOnce(&SaveFileManager::OnSaveFinished, this, save_item_id,
bytes_so_far, is_success));
}
// Notifications sent from the file thread and run on the UI thread.
void SaveFileManager::OnStartSave(const SaveFileCreateInfo& info) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SavePackage* save_package = GetSavePackageFromRenderIds(
info.render_process_id, info.render_frame_routing_id);
if (!save_package) {
// Cancel this request.
SendCancelRequest(info.save_item_id);
return;
}
// Forward this message to SavePackage.
save_package->StartSave(&info);
}
void SaveFileManager::OnUpdateSaveProgress(SaveItemId save_item_id,
int64_t bytes_so_far,
bool write_success) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SavePackage* package = LookupPackage(save_item_id);
if (package)
package->UpdateSaveProgress(save_item_id, bytes_so_far, write_success);
else
SendCancelRequest(save_item_id);
}
void SaveFileManager::OnSaveFinished(SaveItemId save_item_id,
int64_t bytes_so_far,
bool is_success) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
ClearURLLoader(save_item_id);
SavePackage* package = LookupPackage(save_item_id);
if (package)
package->SaveFinished(save_item_id, bytes_so_far, is_success);
}
// Notifications sent from the UI thread and run on the file thread.
// This method will be sent via a user action, or shutdown on the UI thread,
// and run on the file thread. We don't post a message back for cancels,
// but we do forward the cancel to the IO thread. Since this message has been
// sent from the UI thread, the saving job may have already completed and
// won't exist in our map.
void SaveFileManager::CancelSave(SaveItemId save_item_id) {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
auto it = save_file_map_.find(save_item_id);
if (it != save_file_map_.end()) {
std::unique_ptr<SaveFile> save_file = std::move(it->second);
if (!save_file->InProgress()) {
// We've won a race with the UI thread--we finished the file before
// the UI thread cancelled it on us. Unfortunately, in this situation
// the cancel wins, so we need to delete the now detached file.
base::DeleteFile(save_file->FullPath(), false);
} else if (save_file->save_source() ==
SaveFileCreateInfo::SAVE_FILE_FROM_NET) {
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::BindOnce(&SaveFileManager::ClearURLLoader, this, save_item_id));
}
// Whatever the save file is complete or not, just delete it. This
// will delete the underlying file if InProgress() is true.
save_file_map_.erase(it);
}
}
void SaveFileManager::ClearURLLoader(SaveItemId save_item_id) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
auto url_loader_iter = url_loader_helpers_.find(save_item_id);
if (url_loader_iter != url_loader_helpers_.end())
url_loader_helpers_.erase(url_loader_iter);
}
void SaveFileManager::OnDeleteDirectoryOrFile(const base::FilePath& full_path,
bool is_dir) {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
DCHECK(!full_path.empty());
base::DeleteFile(full_path, is_dir);
}
void SaveFileManager::RenameAllFiles(const FinalNamesMap& final_names,
const base::FilePath& resource_dir,
int render_process_id,
int render_frame_routing_id,
SavePackageId save_package_id) {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
if (!resource_dir.empty() && !base::PathExists(resource_dir))
base::CreateDirectory(resource_dir);
for (const auto& i : final_names) {
SaveItemId save_item_id = i.first;
const base::FilePath& final_name = i.second;
auto it = save_file_map_.find(save_item_id);
if (it != save_file_map_.end()) {
SaveFile* save_file = it->second.get();
DCHECK(!save_file->InProgress());
save_file->Rename(final_name);
save_file_map_.erase(it);
}
}
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::BindOnce(&SaveFileManager::OnFinishSavePageJob, this,
render_process_id, render_frame_routing_id,
save_package_id));
}
void SaveFileManager::OnFinishSavePageJob(int render_process_id,
int render_frame_routing_id,
SavePackageId save_package_id) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SavePackage* save_package =
GetSavePackageFromRenderIds(render_process_id, render_frame_routing_id);
if (save_package && save_package->id() == save_package_id)
save_package->Finish();
}
void SaveFileManager::RemoveSavedFileFromFileMap(
const std::vector<SaveItemId>& save_item_ids) {
DCHECK(download::GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
for (const SaveItemId save_item_id : save_item_ids) {
auto it = save_file_map_.find(save_item_id);
if (it != save_file_map_.end()) {
SaveFile* save_file = it->second.get();
DCHECK(!save_file->InProgress());
base::DeleteFile(save_file->FullPath(), false);
save_file_map_.erase(it);
}
}
}
} // namespace content
| null | null | null | null | 21,021 |
51,899 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 51,899 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_GPU_VIDEO_ENCODE_ACCELERATOR_FACTORY_H_
#define MEDIA_GPU_GPU_VIDEO_ENCODE_ACCELERATOR_FACTORY_H_
#include <memory>
#include "gpu/command_buffer/service/gpu_preferences.h"
#include "media/gpu/media_gpu_export.h"
#include "media/video/video_encode_accelerator.h"
namespace media {
class MEDIA_GPU_EXPORT GpuVideoEncodeAcceleratorFactory {
public:
// Creates and Initializes a VideoEncodeAccelerator. Returns nullptr
// if there is no implementation available on the platform or calling
// VideoEncodeAccelerator::Initialize() returns false.
static std::unique_ptr<VideoEncodeAccelerator> CreateVEA(
VideoPixelFormat input_format,
const gfx::Size& input_visible_size,
VideoCodecProfile output_profile,
uint32_t initial_bitrate,
VideoEncodeAccelerator::Client* client,
const gpu::GpuPreferences& gpu_perferences);
// Gets the supported codec profiles for video encoding on the platform.
static VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles(
const gpu::GpuPreferences& gpu_preferences);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(GpuVideoEncodeAcceleratorFactory);
};
} // namespace media
#endif // MEDIA_GPU_GPU_VIDEO_ENCODE_ACCELERATOR_FACTORY_H_
| null | null | null | null | 48,762 |
3,792 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 168,787 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* linux/fs/hpfs/anode.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* handling HPFS anode tree that contains file allocation info
*/
#include "hpfs_fn.h"
/* Find a sector in allocation tree */
secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
struct bplus_header *btree, unsigned sec,
struct buffer_head *bh)
{
anode_secno a = -1;
struct anode *anode;
int i;
int c1, c2 = 0;
go_down:
if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
if (bp_internal(btree)) {
for (i = 0; i < btree->n_used_nodes; i++)
if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
a = le32_to_cpu(btree->u.internal[i].down);
brelse(bh);
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
btree = &anode->btree;
goto go_down;
}
hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
brelse(bh);
return -1;
}
for (i = 0; i < btree->n_used_nodes; i++)
if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
brelse(bh);
return -1;
}
if (inode) {
struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
}
brelse(bh);
return a;
}
hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
brelse(bh);
return -1;
}
/* Add a sector to tree */
secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
{
struct bplus_header *btree;
struct anode *anode = NULL, *ranode = NULL;
struct fnode *fnode;
anode_secno a, na = -1, ra, up = -1;
secno se;
struct buffer_head *bh, *bh1, *bh2;
int n;
unsigned fs;
int c1, c2 = 0;
if (fnod) {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
btree = &fnode->btree;
} else {
if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
btree = &anode->btree;
}
a = node;
go_down:
if ((n = btree->n_used_nodes - 1) < -!!fnod) {
hpfs_error(s, "anode %08x has no entries", a);
brelse(bh);
return -1;
}
if (bp_internal(btree)) {
a = le32_to_cpu(btree->u.internal[n].down);
btree->u.internal[n].file_secno = cpu_to_le32(-1);
mark_buffer_dirty(bh);
brelse(bh);
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
btree = &anode->btree;
goto go_down;
}
if (n >= 0) {
if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
fnod?'f':'a', node);
brelse(bh);
return -1;
}
if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
le32_add_cpu(&btree->u.external[n].length, 1);
mark_buffer_dirty(bh);
brelse(bh);
return se;
}
} else {
if (fsecno) {
hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
brelse(bh);
return -1;
}
se = !fnod ? node : (node + 16384) & ~16383;
}
if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
brelse(bh);
return -1;
}
fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
if (!btree->n_free_nodes) {
up = a != node ? le32_to_cpu(anode->up) : -1;
if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
brelse(bh);
hpfs_free_sectors(s, se, 1);
return -1;
}
if (a == node && fnod) {
anode->up = cpu_to_le32(node);
anode->btree.flags |= BP_fnode_parent;
anode->btree.n_used_nodes = btree->n_used_nodes;
anode->btree.first_free = btree->first_free;
anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
btree->flags |= BP_internal;
btree->n_free_nodes = 11;
btree->n_used_nodes = 1;
btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
btree->u.internal[0].file_secno = cpu_to_le32(-1);
btree->u.internal[0].down = cpu_to_le32(na);
mark_buffer_dirty(bh);
} else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
brelse(bh);
brelse(bh1);
hpfs_free_sectors(s, se, 1);
hpfs_free_sectors(s, na, 1);
return -1;
}
brelse(bh);
bh = bh1;
btree = &anode->btree;
}
btree->n_free_nodes--; n = btree->n_used_nodes++;
le16_add_cpu(&btree->first_free, 12);
btree->u.external[n].disk_secno = cpu_to_le32(se);
btree->u.external[n].file_secno = cpu_to_le32(fs);
btree->u.external[n].length = cpu_to_le32(1);
mark_buffer_dirty(bh);
brelse(bh);
if ((a == node && fnod) || na == -1) return se;
c2 = 0;
while (up != (anode_secno)-1) {
struct anode *new_anode;
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
if (up != node || !fnod) {
if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
btree = &anode->btree;
} else {
if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
btree = &fnode->btree;
}
if (btree->n_free_nodes) {
btree->n_free_nodes--; n = btree->n_used_nodes++;
le16_add_cpu(&btree->first_free, 8);
btree->u.internal[n].file_secno = cpu_to_le32(-1);
btree->u.internal[n].down = cpu_to_le32(na);
btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
mark_buffer_dirty(bh);
brelse(bh);
brelse(bh2);
hpfs_free_sectors(s, ra, 1);
if ((anode = hpfs_map_anode(s, na, &bh))) {
anode->up = cpu_to_le32(up);
if (up == node && fnod)
anode->btree.flags |= BP_fnode_parent;
else
anode->btree.flags &= ~BP_fnode_parent;
mark_buffer_dirty(bh);
brelse(bh);
}
return se;
}
up = up != node ? le32_to_cpu(anode->up) : -1;
btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
mark_buffer_dirty(bh);
brelse(bh);
a = na;
if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
anode = new_anode;
/*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
anode->btree.flags |= BP_internal;
anode->btree.n_used_nodes = 1;
anode->btree.n_free_nodes = 59;
anode->btree.first_free = cpu_to_le16(16);
anode->btree.u.internal[0].down = cpu_to_le32(a);
anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
mark_buffer_dirty(bh);
brelse(bh);
if ((anode = hpfs_map_anode(s, a, &bh))) {
anode->up = cpu_to_le32(na);
mark_buffer_dirty(bh);
brelse(bh);
}
} else na = a;
}
if ((anode = hpfs_map_anode(s, na, &bh))) {
anode->up = cpu_to_le32(node);
if (fnod)
anode->btree.flags |= BP_fnode_parent;
mark_buffer_dirty(bh);
brelse(bh);
}
if (!fnod) {
if (!(anode = hpfs_map_anode(s, node, &bh))) {
brelse(bh2);
return -1;
}
btree = &anode->btree;
} else {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
brelse(bh2);
return -1;
}
btree = &fnode->btree;
}
ranode->up = cpu_to_le32(node);
memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
if (fnod)
ranode->btree.flags |= BP_fnode_parent;
ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
struct anode *unode;
if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
unode->up = cpu_to_le32(ra);
unode->btree.flags &= ~BP_fnode_parent;
mark_buffer_dirty(bh1);
brelse(bh1);
}
}
btree->flags |= BP_internal;
btree->n_free_nodes = fnod ? 10 : 58;
btree->n_used_nodes = 2;
btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
btree->u.internal[0].file_secno = cpu_to_le32(fs);
btree->u.internal[0].down = cpu_to_le32(ra);
btree->u.internal[1].file_secno = cpu_to_le32(-1);
btree->u.internal[1].down = cpu_to_le32(na);
mark_buffer_dirty(bh);
brelse(bh);
mark_buffer_dirty(bh2);
brelse(bh2);
return se;
}
/*
* Remove allocation tree. Recursion would look much nicer but
* I want to avoid it because it can cause stack overflow.
*/
void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
{
struct bplus_header *btree1 = btree;
struct anode *anode = NULL;
anode_secno ano = 0, oano;
struct buffer_head *bh;
int level = 0;
int pos = 0;
int i;
int c1, c2 = 0;
int d1, d2;
go_down:
d2 = 0;
while (bp_internal(btree1)) {
ano = le32_to_cpu(btree1->u.internal[pos].down);
if (level) brelse(bh);
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
return;
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
btree1 = &anode->btree;
level++;
pos = 0;
}
for (i = 0; i < btree1->n_used_nodes; i++)
hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
go_up:
if (!level) return;
brelse(bh);
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
hpfs_free_sectors(s, ano, 1);
oano = ano;
ano = le32_to_cpu(anode->up);
if (--level) {
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
btree1 = &anode->btree;
} else btree1 = btree;
for (i = 0; i < btree1->n_used_nodes; i++) {
if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
if ((pos = i + 1) < btree1->n_used_nodes)
goto go_down;
else
goto go_up;
}
}
hpfs_error(s,
"reference to anode %08x not found in anode %08x "
"(probably bad up pointer)",
oano, level ? ano : -1);
if (level)
brelse(bh);
}
/* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
{
struct anode *anode;
struct buffer_head *bh;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
}
int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
unsigned len, char *buf)
{
struct buffer_head *bh;
char *data;
secno sec;
unsigned l;
while (len) {
if (ano) {
if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
return -1;
} else sec = a + (pos >> 9);
if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
return -1;
l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
memcpy(buf, data + (pos & 0x1ff), l);
brelse(bh);
buf += l; pos += l; len -= l;
}
return 0;
}
int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
unsigned len, const char *buf)
{
struct buffer_head *bh;
char *data;
secno sec;
unsigned l;
while (len) {
if (ano) {
if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
return -1;
} else sec = a + (pos >> 9);
if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
return -1;
l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
memcpy(data + (pos & 0x1ff), buf, l);
mark_buffer_dirty(bh);
brelse(bh);
buf += l; pos += l; len -= l;
}
return 0;
}
void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
{
struct anode *anode;
struct buffer_head *bh;
if (ano) {
if (!(anode = hpfs_map_anode(s, a, &bh))) return;
hpfs_remove_btree(s, &anode->btree);
brelse(bh);
hpfs_free_sectors(s, a, 1);
} else hpfs_free_sectors(s, a, (len + 511) >> 9);
}
/* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
{
struct fnode *fnode;
struct anode *anode;
struct buffer_head *bh;
struct bplus_header *btree;
anode_secno node = f;
int i, j, nodes;
int c1, c2 = 0;
if (fno) {
if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
btree = &fnode->btree;
} else {
if (!(anode = hpfs_map_anode(s, f, &bh))) return;
btree = &anode->btree;
}
if (!secs) {
hpfs_remove_btree(s, btree);
if (fno) {
btree->n_free_nodes = 8;
btree->n_used_nodes = 0;
btree->first_free = cpu_to_le16(8);
btree->flags &= ~BP_internal;
mark_buffer_dirty(bh);
} else hpfs_free_sectors(s, f, 1);
brelse(bh);
return;
}
while (bp_internal(btree)) {
nodes = btree->n_used_nodes + btree->n_free_nodes;
for (i = 0; i < btree->n_used_nodes; i++)
if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
brelse(bh);
hpfs_error(s, "internal btree %08x doesn't end with -1", node);
return;
f:
for (j = i + 1; j < btree->n_used_nodes; j++)
hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
btree->n_used_nodes = i + 1;
btree->n_free_nodes = nodes - btree->n_used_nodes;
btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
mark_buffer_dirty(bh);
if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
brelse(bh);
return;
}
node = le32_to_cpu(btree->u.internal[i].down);
brelse(bh);
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
return;
if (!(anode = hpfs_map_anode(s, node, &bh))) return;
btree = &anode->btree;
}
nodes = btree->n_used_nodes + btree->n_free_nodes;
for (i = 0; i < btree->n_used_nodes; i++)
if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
brelse(bh);
return;
ff:
if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
if (i) i--;
}
else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
- secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
}
for (j = i + 1; j < btree->n_used_nodes; j++)
hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
btree->n_used_nodes = i + 1;
btree->n_free_nodes = nodes - btree->n_used_nodes;
btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
mark_buffer_dirty(bh);
brelse(bh);
}
/* Remove file or directory and it's eas - note that directory must
be empty when this is called. */
void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
{
struct buffer_head *bh;
struct fnode *fnode;
struct extended_attribute *ea;
struct extended_attribute *ea_end;
if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
if (ea_indirect(ea))
hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
brelse(bh);
hpfs_free_sectors(s, fno, 1);
}
| null | null | null | null | 77,134 |
42,391 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 42,391 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_BASE_PATHS_WIN_H_
#define BASE_BASE_PATHS_WIN_H_
// This file declares windows-specific path keys for the base module.
// These can be used with the PathService to access various special
// directories and files.
namespace base {
enum {
PATH_WIN_START = 100,
DIR_WINDOWS, // Windows directory, usually "c:\windows"
DIR_SYSTEM, // Usually c:\windows\system32"
// 32-bit 32-bit on 64-bit 64-bit on 64-bit
// DIR_PROGRAM_FILES 1 2 1
// DIR_PROGRAM_FILESX86 1 2 2
// DIR_PROGRAM_FILES6432 1 1 1
// 1 - C:\Program Files 2 - C:\Program Files (x86)
DIR_PROGRAM_FILES, // See table above.
DIR_PROGRAM_FILESX86, // See table above.
DIR_PROGRAM_FILES6432, // See table above.
DIR_IE_INTERNET_CACHE, // Temporary Internet Files directory.
DIR_COMMON_START_MENU, // Usually "C:\ProgramData\Microsoft\Windows\
// Start Menu\Programs"
DIR_START_MENU, // Usually "C:\Users\<user>\AppData\Roaming\
// Microsoft\Windows\Start Menu\Programs"
DIR_APP_DATA, // Application Data directory under the user
// profile.
DIR_LOCAL_APP_DATA, // "Local Settings\Application Data" directory
// under the user profile.
DIR_COMMON_APP_DATA, // Usually "C:\ProgramData".
DIR_APP_SHORTCUTS, // Where tiles on the start screen are stored,
// only for Windows 8. Maps to "Local\AppData\
// Microsoft\Windows\Application Shortcuts\".
DIR_COMMON_DESKTOP, // Directory for the common desktop (visible
// on all user's Desktop).
DIR_USER_QUICK_LAUNCH, // Directory for the quick launch shortcuts.
DIR_TASKBAR_PINS, // Directory for the shortcuts pinned to taskbar.
DIR_IMPLICIT_APP_SHORTCUTS, // The implicit user pinned shortcut directory.
DIR_WINDOWS_FONTS, // Usually C:\Windows\Fonts.
PATH_WIN_END
};
} // namespace base
#endif // BASE_BASE_PATHS_WIN_H_
| null | null | null | null | 39,254 |
5,467 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 170,462 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* tegra30_i2s.c - Tegra30 I2S driver
*
* Author: Stephen Warren <swarren@nvidia.com>
* Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
*
* Based on code copyright/by:
*
* Copyright (c) 2009-2010, NVIDIA Corporation.
* Scott Peterson <speterson@nvidia.com>
*
* Copyright (C) 2010 Google, Inc.
* Iliyan Malchev <malchev@google.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "tegra30_ahub.h"
#include "tegra30_i2s.h"
#define DRV_NAME "tegra30-i2s"
static int tegra30_i2s_runtime_suspend(struct device *dev)
{
struct tegra30_i2s *i2s = dev_get_drvdata(dev);
regcache_cache_only(i2s->regmap, true);
clk_disable_unprepare(i2s->clk_i2s);
return 0;
}
static int tegra30_i2s_runtime_resume(struct device *dev)
{
struct tegra30_i2s *i2s = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(i2s->clk_i2s);
if (ret) {
dev_err(dev, "clk_enable failed: %d\n", ret);
return ret;
}
regcache_cache_only(i2s->regmap, false);
return 0;
}
static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int mask = 0, val = 0;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
default:
return -EINVAL;
}
mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
default:
return -EINVAL;
}
mask |= TEGRA30_I2S_CTRL_FRAME_FORMAT_MASK |
TEGRA30_I2S_CTRL_LRCK_MASK;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_DSP_B:
val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
val |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
break;
case SND_SOC_DAIFMT_I2S:
val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_RIGHT_J:
val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_LEFT_J:
val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
default:
return -EINVAL;
}
pm_runtime_get_sync(dai->dev);
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL, mask, val);
pm_runtime_put(dai->dev);
return 0;
}
static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct device *dev = dai->dev;
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int mask, val, reg;
int ret, sample_size, srate, i2sclock, bitcnt;
struct tegra30_ahub_cif_conf cif_conf;
if (params_channels(params) != 2)
return -EINVAL;
mask = TEGRA30_I2S_CTRL_BIT_SIZE_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
val = TEGRA30_I2S_CTRL_BIT_SIZE_16;
sample_size = 16;
break;
default:
return -EINVAL;
}
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL, mask, val);
srate = params_rate(params);
/* Final "* 2" required by Tegra hardware */
i2sclock = srate * params_channels(params) * sample_size * 2;
bitcnt = (i2sclock / (2 * srate)) - 1;
if (bitcnt < 0 || bitcnt > TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US)
return -EINVAL;
ret = clk_set_rate(i2s->clk_i2s, i2sclock);
if (ret) {
dev_err(dev, "Can't set I2S clock rate: %d\n", ret);
return ret;
}
val = bitcnt << TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
if (i2sclock % (2 * srate))
val |= TEGRA30_I2S_TIMING_NON_SYM_ENABLE;
regmap_write(i2s->regmap, TEGRA30_I2S_TIMING, val);
cif_conf.threshold = 0;
cif_conf.audio_channels = 2;
cif_conf.client_channels = 2;
cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
cif_conf.expand = 0;
cif_conf.stereo_conv = 0;
cif_conf.replicate = 0;
cif_conf.truncate = 0;
cif_conf.mono_conv = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_RX;
reg = TEGRA30_I2S_CIF_RX_CTRL;
} else {
cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_TX;
reg = TEGRA30_I2S_CIF_TX_CTRL;
}
i2s->soc_data->set_audio_cif(i2s->regmap, reg, &cif_conf);
val = (1 << TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT) |
(1 << TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT);
regmap_write(i2s->regmap, TEGRA30_I2S_OFFSET, val);
return 0;
}
static void tegra30_i2s_start_playback(struct tegra30_i2s *i2s)
{
tegra30_ahub_enable_tx_fifo(i2s->playback_fifo_cif);
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
TEGRA30_I2S_CTRL_XFER_EN_TX,
TEGRA30_I2S_CTRL_XFER_EN_TX);
}
static void tegra30_i2s_stop_playback(struct tegra30_i2s *i2s)
{
tegra30_ahub_disable_tx_fifo(i2s->playback_fifo_cif);
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
TEGRA30_I2S_CTRL_XFER_EN_TX, 0);
}
static void tegra30_i2s_start_capture(struct tegra30_i2s *i2s)
{
tegra30_ahub_enable_rx_fifo(i2s->capture_fifo_cif);
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
TEGRA30_I2S_CTRL_XFER_EN_RX,
TEGRA30_I2S_CTRL_XFER_EN_RX);
}
static void tegra30_i2s_stop_capture(struct tegra30_i2s *i2s)
{
tegra30_ahub_disable_rx_fifo(i2s->capture_fifo_cif);
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
TEGRA30_I2S_CTRL_XFER_EN_RX, 0);
}
static int tegra30_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
tegra30_i2s_start_playback(i2s);
else
tegra30_i2s_start_capture(i2s);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
tegra30_i2s_stop_playback(i2s);
else
tegra30_i2s_stop_capture(i2s);
break;
default:
return -EINVAL;
}
return 0;
}
static int tegra30_i2s_probe(struct snd_soc_dai *dai)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
dai->capture_dma_data = &i2s->capture_dma_data;
dai->playback_dma_data = &i2s->playback_dma_data;
return 0;
}
static struct snd_soc_dai_ops tegra30_i2s_dai_ops = {
.set_fmt = tegra30_i2s_set_fmt,
.hw_params = tegra30_i2s_hw_params,
.trigger = tegra30_i2s_trigger,
};
static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
.probe = tegra30_i2s_probe,
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &tegra30_i2s_dai_ops,
.symmetric_rates = 1,
};
static const struct snd_soc_component_driver tegra30_i2s_component = {
.name = DRV_NAME,
};
static bool tegra30_i2s_wr_rd_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TEGRA30_I2S_CTRL:
case TEGRA30_I2S_TIMING:
case TEGRA30_I2S_OFFSET:
case TEGRA30_I2S_CH_CTRL:
case TEGRA30_I2S_SLOT_CTRL:
case TEGRA30_I2S_CIF_RX_CTRL:
case TEGRA30_I2S_CIF_TX_CTRL:
case TEGRA30_I2S_FLOWCTL:
case TEGRA30_I2S_TX_STEP:
case TEGRA30_I2S_FLOW_STATUS:
case TEGRA30_I2S_FLOW_TOTAL:
case TEGRA30_I2S_FLOW_OVER:
case TEGRA30_I2S_FLOW_UNDER:
case TEGRA30_I2S_LCOEF_1_4_0:
case TEGRA30_I2S_LCOEF_1_4_1:
case TEGRA30_I2S_LCOEF_1_4_2:
case TEGRA30_I2S_LCOEF_1_4_3:
case TEGRA30_I2S_LCOEF_1_4_4:
case TEGRA30_I2S_LCOEF_1_4_5:
case TEGRA30_I2S_LCOEF_2_4_0:
case TEGRA30_I2S_LCOEF_2_4_1:
case TEGRA30_I2S_LCOEF_2_4_2:
return true;
default:
return false;
}
}
static bool tegra30_i2s_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TEGRA30_I2S_FLOW_STATUS:
case TEGRA30_I2S_FLOW_TOTAL:
case TEGRA30_I2S_FLOW_OVER:
case TEGRA30_I2S_FLOW_UNDER:
return true;
default:
return false;
}
}
static const struct regmap_config tegra30_i2s_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = TEGRA30_I2S_LCOEF_2_4_2,
.writeable_reg = tegra30_i2s_wr_rd_reg,
.readable_reg = tegra30_i2s_wr_rd_reg,
.volatile_reg = tegra30_i2s_volatile_reg,
.cache_type = REGCACHE_FLAT,
};
static const struct tegra30_i2s_soc_data tegra30_i2s_config = {
.set_audio_cif = tegra30_ahub_set_cif,
};
static const struct tegra30_i2s_soc_data tegra124_i2s_config = {
.set_audio_cif = tegra124_ahub_set_cif,
};
static const struct of_device_id tegra30_i2s_of_match[] = {
{ .compatible = "nvidia,tegra124-i2s", .data = &tegra124_i2s_config },
{ .compatible = "nvidia,tegra30-i2s", .data = &tegra30_i2s_config },
{},
};
static int tegra30_i2s_platform_probe(struct platform_device *pdev)
{
struct tegra30_i2s *i2s;
const struct of_device_id *match;
u32 cif_ids[2];
struct resource *mem;
void __iomem *regs;
int ret;
i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_i2s), GFP_KERNEL);
if (!i2s) {
dev_err(&pdev->dev, "Can't allocate tegra30_i2s\n");
ret = -ENOMEM;
goto err;
}
dev_set_drvdata(&pdev->dev, i2s);
match = of_match_device(tegra30_i2s_of_match, &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "Error: No device match found\n");
ret = -ENODEV;
goto err;
}
i2s->soc_data = (struct tegra30_i2s_soc_data *)match->data;
i2s->dai = tegra30_i2s_dai_template;
i2s->dai.name = dev_name(&pdev->dev);
ret = of_property_read_u32_array(pdev->dev.of_node,
"nvidia,ahub-cif-ids", cif_ids,
ARRAY_SIZE(cif_ids));
if (ret < 0)
goto err;
i2s->playback_i2s_cif = cif_ids[0];
i2s->capture_i2s_cif = cif_ids[1];
i2s->clk_i2s = clk_get(&pdev->dev, NULL);
if (IS_ERR(i2s->clk_i2s)) {
dev_err(&pdev->dev, "Can't retrieve i2s clock\n");
ret = PTR_ERR(i2s->clk_i2s);
goto err;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto err_clk_put;
}
i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
&tegra30_i2s_regmap_config);
if (IS_ERR(i2s->regmap)) {
dev_err(&pdev->dev, "regmap init failed\n");
ret = PTR_ERR(i2s->regmap);
goto err_clk_put;
}
regcache_cache_only(i2s->regmap, true);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra30_i2s_runtime_resume(&pdev->dev);
if (ret)
goto err_pm_disable;
}
i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
i2s->playback_dma_data.maxburst = 4;
ret = tegra30_ahub_allocate_tx_fifo(&i2s->playback_fifo_cif,
i2s->playback_dma_chan,
sizeof(i2s->playback_dma_chan),
&i2s->playback_dma_data.addr);
if (ret) {
dev_err(&pdev->dev, "Could not alloc TX FIFO: %d\n", ret);
goto err_suspend;
}
ret = tegra30_ahub_set_rx_cif_source(i2s->playback_i2s_cif,
i2s->playback_fifo_cif);
if (ret) {
dev_err(&pdev->dev, "Could not route TX FIFO: %d\n", ret);
goto err_free_tx_fifo;
}
i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
i2s->capture_dma_data.maxburst = 4;
ret = tegra30_ahub_allocate_rx_fifo(&i2s->capture_fifo_cif,
i2s->capture_dma_chan,
sizeof(i2s->capture_dma_chan),
&i2s->capture_dma_data.addr);
if (ret) {
dev_err(&pdev->dev, "Could not alloc RX FIFO: %d\n", ret);
goto err_unroute_tx_fifo;
}
ret = tegra30_ahub_set_rx_cif_source(i2s->capture_fifo_cif,
i2s->capture_i2s_cif);
if (ret) {
dev_err(&pdev->dev, "Could not route TX FIFO: %d\n", ret);
goto err_free_rx_fifo;
}
ret = snd_soc_register_component(&pdev->dev, &tegra30_i2s_component,
&i2s->dai, 1);
if (ret) {
dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
ret = -ENOMEM;
goto err_unroute_rx_fifo;
}
ret = tegra_pcm_platform_register_with_chan_names(&pdev->dev,
&i2s->dma_config, i2s->playback_dma_chan,
i2s->capture_dma_chan);
if (ret) {
dev_err(&pdev->dev, "Could not register PCM: %d\n", ret);
goto err_unregister_component;
}
return 0;
err_unregister_component:
snd_soc_unregister_component(&pdev->dev);
err_unroute_rx_fifo:
tegra30_ahub_unset_rx_cif_source(i2s->capture_fifo_cif);
err_free_rx_fifo:
tegra30_ahub_free_rx_fifo(i2s->capture_fifo_cif);
err_unroute_tx_fifo:
tegra30_ahub_unset_rx_cif_source(i2s->playback_i2s_cif);
err_free_tx_fifo:
tegra30_ahub_free_tx_fifo(i2s->playback_fifo_cif);
err_suspend:
if (!pm_runtime_status_suspended(&pdev->dev))
tegra30_i2s_runtime_suspend(&pdev->dev);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
err_clk_put:
clk_put(i2s->clk_i2s);
err:
return ret;
}
static int tegra30_i2s_platform_remove(struct platform_device *pdev)
{
struct tegra30_i2s *i2s = dev_get_drvdata(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra30_i2s_runtime_suspend(&pdev->dev);
tegra_pcm_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
tegra30_ahub_unset_rx_cif_source(i2s->capture_fifo_cif);
tegra30_ahub_free_rx_fifo(i2s->capture_fifo_cif);
tegra30_ahub_unset_rx_cif_source(i2s->playback_i2s_cif);
tegra30_ahub_free_tx_fifo(i2s->playback_fifo_cif);
clk_put(i2s->clk_i2s);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tegra30_i2s_suspend(struct device *dev)
{
struct tegra30_i2s *i2s = dev_get_drvdata(dev);
regcache_mark_dirty(i2s->regmap);
return 0;
}
static int tegra30_i2s_resume(struct device *dev)
{
struct tegra30_i2s *i2s = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
ret = regcache_sync(i2s->regmap);
pm_runtime_put(dev);
return ret;
}
#endif
static const struct dev_pm_ops tegra30_i2s_pm_ops = {
SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
tegra30_i2s_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra30_i2s_suspend, tegra30_i2s_resume)
};
static struct platform_driver tegra30_i2s_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = tegra30_i2s_of_match,
.pm = &tegra30_i2s_pm_ops,
},
.probe = tegra30_i2s_platform_probe,
.remove = tegra30_i2s_platform_remove,
};
module_platform_driver(tegra30_i2s_driver);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("Tegra30 I2S ASoC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_DEVICE_TABLE(of, tegra30_i2s_of_match);
| null | null | null | null | 78,809 |
45,870 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 45,870 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef ASH_HOST_ASH_WINDOW_TREE_HOST_MIRRORING_UNIFIED_H_
#define ASH_HOST_ASH_WINDOW_TREE_HOST_MIRRORING_UNIFIED_H_
#include "ash/host/ash_window_tree_host_platform.h"
namespace ash {
class AshWindowTreeHostMirroringDelegate;
// A window tree host for the mirroing displays that constitute the unified
// desktop. This correctly handles coordinates conversion from DIP to pixels and
// vice versa.
class AshWindowTreeHostMirroringUnified : public AshWindowTreeHostPlatform {
public:
AshWindowTreeHostMirroringUnified(
const gfx::Rect& initial_bounds,
int64_t mirroring_display_id,
AshWindowTreeHostMirroringDelegate* delegate);
~AshWindowTreeHostMirroringUnified() override;
// aura::WindowTreeHost:
gfx::Transform GetRootTransformForLocalEventCoordinates() const override;
void ConvertDIPToPixels(gfx::Point* point) const override;
void ConvertPixelsToDIP(gfx::Point* point) const override;
// ash::AshWindowTreeHostPlatform:
void PrepareForShutdown() override;
private:
int64_t mirroring_display_id_;
AshWindowTreeHostMirroringDelegate* delegate_; // Not owned.
bool is_shutting_down_ = false;
DISALLOW_COPY_AND_ASSIGN(AshWindowTreeHostMirroringUnified);
};
} // namespace ash
#endif // ASH_HOST_ASH_WINDOW_TREE_HOST_MIRRORING_UNIFIED_H_
| null | null | null | null | 42,733 |
69,447 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 69,447 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/*
* ptw32_tkAssocDestroy.c
*
* Description:
* This translation unit implements routines which are private to
* the implementation and may be used throughout it.
*
* --------------------------------------------------------------------------
*
* Pthreads-win32 - POSIX Threads Library for Win32
* Copyright(C) 1998 John E. Bossom
* Copyright(C) 1999,2005 Pthreads-win32 contributors
*
* Contact Email: rpj@callisto.canberra.edu.au
*
* The current list of contributors is contained
* in the file CONTRIBUTORS included with the source
* code distribution. The list can also be seen at the
* following World Wide Web location:
* http://sources.redhat.com/pthreads-win32/contributors.html
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library in the file COPYING.LIB;
* if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "pthread.h"
#include "implement.h"
void
ptw32_tkAssocDestroy (ThreadKeyAssoc * assoc)
/*
* -------------------------------------------------------------------
* This routine releases all resources for the given ThreadKeyAssoc
* once it is no longer being referenced
* ie) either the key or thread has stopped referencing it.
*
* Parameters:
* assoc
* an instance of ThreadKeyAssoc.
* Returns:
* N/A
* -------------------------------------------------------------------
*/
{
/*
* Both key->keyLock and thread->threadLock are locked before
* entry to this routine.
*/
if (assoc != NULL)
{
ThreadKeyAssoc * prev, * next;
/* Remove assoc from thread's keys chain */
prev = assoc->prevKey;
next = assoc->nextKey;
if (prev != NULL)
{
prev->nextKey = next;
}
if (next != NULL)
{
next->prevKey = prev;
}
if (assoc->thread->keys == assoc)
{
/* We're at the head of the thread's keys chain */
assoc->thread->keys = next;
}
if (assoc->thread->nextAssoc == assoc)
{
/*
* Thread is exiting and we're deleting the assoc to be processed next.
* Hand thread the assoc after this one.
*/
assoc->thread->nextAssoc = next;
}
/* Remove assoc from key's threads chain */
prev = assoc->prevThread;
next = assoc->nextThread;
if (prev != NULL)
{
prev->nextThread = next;
}
if (next != NULL)
{
next->prevThread = prev;
}
if (assoc->key->threads == assoc)
{
/* We're at the head of the key's threads chain */
assoc->key->threads = next;
}
free (assoc);
}
} /* ptw32_tkAssocDestroy */
| null | null | null | null | 66,310 |
26,118 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 26,118 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "extensions/browser/api/guest_view/extension_view/extension_view_internal_api.h"
#include <utility>
#include "base/memory/ptr_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "components/crx_file/id_util.h"
#include "content/public/browser/render_frame_host.h"
#include "content/public/browser/render_process_host.h"
#include "content/public/browser/storage_partition.h"
#include "content/public/browser/web_contents.h"
#include "content/public/common/stop_find_action.h"
#include "extensions/browser/guest_view/extension_view/whitelist/extension_view_whitelist.h"
#include "extensions/common/api/extension_view_internal.h"
#include "extensions/common/constants.h"
namespace extensionview = extensions::api::extension_view_internal;
namespace extensions {
bool ExtensionViewInternalExtensionFunction::RunAsync() {
int instance_id = 0;
EXTENSION_FUNCTION_VALIDATE(args_->GetInteger(0, &instance_id));
ExtensionViewGuest* guest = ExtensionViewGuest::From(
render_frame_host()->GetProcess()->GetID(), instance_id);
if (!guest)
return false;
return RunAsyncSafe(guest);
}
// Checks the validity of |src|, including that it follows the chrome extension
// scheme and that its extension ID is valid.
// Returns true if |src| is valid.
bool IsSrcValid(const GURL& src) {
// Check if src is valid and matches the extension scheme.
if (!src.is_valid() || !src.SchemeIs(kExtensionScheme))
return false;
// Get the extension id and check if it is valid.
std::string extension_id = src.host();
if (!crx_file::id_util::IdIsValid(extension_id) ||
!IsExtensionIdWhitelisted(extension_id))
return false;
return true;
}
bool ExtensionViewInternalLoadSrcFunction::RunAsyncSafe(
ExtensionViewGuest* guest) {
std::unique_ptr<extensionview::LoadSrc::Params> params(
extensionview::LoadSrc::Params::Create(*args_));
EXTENSION_FUNCTION_VALIDATE(params.get());
std::string src = params->src;
GURL url(src);
bool has_load_succeeded = false;
bool is_src_valid = IsSrcValid(url);
if (is_src_valid)
has_load_succeeded = guest->NavigateGuest(src, true /* force_navigation */);
// Return whether load is successful.
SetResult(std::make_unique<base::Value>(has_load_succeeded));
SendResponse(true);
return true;
}
bool ExtensionViewInternalParseSrcFunction::RunAsync() {
std::unique_ptr<extensionview::ParseSrc::Params> params(
extensionview::ParseSrc::Params::Create(*args_));
EXTENSION_FUNCTION_VALIDATE(params.get());
GURL url(params->src);
bool is_src_valid = IsSrcValid(url);
// Return whether the src is valid and the current extension ID to
// the callback.
std::unique_ptr<base::ListValue> result_list(new base::ListValue());
result_list->AppendBoolean(is_src_valid);
result_list->AppendString(url.host());
SetResultList(std::move(result_list));
SendResponse(true);
return true;
}
} // namespace extensions
| null | null | null | null | 22,981 |
30,983 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 30,983 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
/*
* Copyright (C) 2010 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_DICTIONARY_H_
#define THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_DICTIONARY_H_
#include "third_party/blink/renderer/bindings/core/v8/dictionary_iterator.h"
#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
#include "third_party/blink/renderer/core/core_export.h"
#include "third_party/blink/renderer/platform/wtf/hash_map.h"
#include "third_party/blink/renderer/platform/wtf/text/string_view.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
#include "v8/include/v8.h"
namespace blink {
class ExecutionContext;
// Dictionary class provides ways to retrieve property values as C++ objects
// from a V8 object. Instances of this class must not outlive V8's handle scope
// because they hold a V8 value without putting it on persistent handles.
class CORE_EXPORT Dictionary final {
DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
public:
Dictionary() : isolate_(nullptr) {}
Dictionary(v8::Isolate*,
v8::Local<v8::Value> dictionary_object,
ExceptionState&);
Dictionary& operator=(const Dictionary&) = default;
bool IsObject() const { return !dictionary_object_.IsEmpty(); }
bool IsUndefinedOrNull() const { return !IsObject(); }
v8::Local<v8::Value> V8Value() const {
if (!isolate_)
return v8::Local<v8::Value>();
switch (value_type_) {
case ValueType::kUndefined:
return v8::Undefined(isolate_);
case ValueType::kNull:
return v8::Null(isolate_);
case ValueType::kObject:
return dictionary_object_;
default:
NOTREACHED();
return v8::Local<v8::Value>();
}
}
bool Get(const StringView& key, v8::Local<v8::Value>& value) const {
return isolate_ && GetInternal(V8String(isolate_, key), value);
}
bool Get(const StringView& key,
v8::Local<v8::Value>& value,
ExceptionState& exception_state) const {
return isolate_ &&
GetInternal(V8String(isolate_, key), value, exception_state);
}
bool Get(const StringView& key, Dictionary&) const;
// Gets the value of the given property in this dictionary and returns it.
// The type parameter |IDLType| is an IDL type (e.g., IDLByteString).
// - If accessing the property raises an error, the error is set to the
// ExceptionState and returns nothing.
// - If converting data fails, the error is set to the ExceptionState and
// returns nothing.
// - If |key| property is not present in this dictionary (including the case
// where the stored value is |undefined|), returns nothing.
// - Otherwise, returns the value.
template <typename IDLType>
WTF::Optional<typename IDLType::ImplType> Get(
const StringView& key,
ExceptionState& exception_state) const {
v8::Local<v8::Value> v8_value;
DCHECK(!exception_state.HadException());
if (!Get(key, v8_value, exception_state))
return WTF::nullopt;
DCHECK(!exception_state.HadException());
DCHECK(!v8_value.IsEmpty());
if (v8_value->IsUndefined())
return WTF::nullopt;
auto value = NativeValueTraits<IDLType>::NativeValue(isolate_, v8_value,
exception_state);
if (exception_state.HadException())
return WTF::nullopt;
return value;
}
HashMap<String, String> GetOwnPropertiesAsStringHashMap(
ExceptionState&) const;
Vector<String> GetPropertyNames(ExceptionState&) const;
bool HasProperty(const StringView& key, ExceptionState&) const;
v8::Isolate* GetIsolate() const { return isolate_; }
v8::Local<v8::Context> V8Context() const {
DCHECK(isolate_);
return isolate_->GetCurrentContext();
}
DictionaryIterator GetIterator(ExecutionContext*) const;
private:
bool GetInternal(const v8::Local<v8::Value>& key,
v8::Local<v8::Value>& result) const;
bool GetInternal(const v8::Local<v8::Value>& key,
v8::Local<v8::Value>& result,
ExceptionState&) const;
v8::Isolate* isolate_;
// Undefined, Null, or Object is allowed as type of dictionary.
enum class ValueType {
kUndefined,
kNull,
kObject
} value_type_ = ValueType::kUndefined;
v8::Local<v8::Object> dictionary_object_; // an Object or empty
};
template <>
struct NativeValueTraits<Dictionary>
: public NativeValueTraitsBase<Dictionary> {
static Dictionary NativeValue(v8::Isolate* isolate,
v8::Local<v8::Value> value,
ExceptionState& exception_state) {
return Dictionary(isolate, value, exception_state);
}
};
// DictionaryHelper is a collection of static methods for getting or
// converting a value from Dictionary.
// DEPRECATED, Use template <typename IDLType> Dictionary::Get.
struct DictionaryHelper {
STATIC_ONLY(DictionaryHelper);
template <typename T>
static bool Get(const Dictionary&, const StringView& key, T& value);
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_DICTIONARY_H_
| null | null | null | null | 27,846 |
48,120 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 48,120 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/views/focus/focus_manager.h"
#include <algorithm>
#include <vector>
#include "base/auto_reset.h"
#include "base/i18n/rtl.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "ui/base/accelerators/accelerator.h"
#include "ui/base/ime/input_method.h"
#include "ui/base/ime/text_input_client.h"
#include "ui/events/event.h"
#include "ui/events/keycodes/keyboard_codes.h"
#include "ui/views/focus/focus_manager_delegate.h"
#include "ui/views/focus/focus_search.h"
#include "ui/views/focus/widget_focus_manager.h"
#include "ui/views/view.h"
#include "ui/views/view_tracker.h"
#include "ui/views/widget/root_view.h"
#include "ui/views/widget/widget.h"
#include "ui/views/widget/widget_delegate.h"
namespace views {
bool FocusManager::arrow_key_traversal_enabled_ = false;
FocusManager::FocusManager(Widget* widget,
std::unique_ptr<FocusManagerDelegate> delegate)
: widget_(widget),
delegate_(std::move(delegate)),
view_tracker_for_stored_view_(std::make_unique<ViewTracker>()) {
DCHECK(widget_);
}
FocusManager::~FocusManager() {
if (focused_view_)
focused_view_->RemoveObserver(this);
}
bool FocusManager::OnKeyEvent(const ui::KeyEvent& event) {
const int key_code = event.key_code();
if (event.type() != ui::ET_KEY_PRESSED && event.type() != ui::ET_KEY_RELEASED)
return false;
if (shortcut_handling_suspended())
return true;
ui::Accelerator accelerator(event);
if (event.type() == ui::ET_KEY_PRESSED) {
// If the focused view wants to process the key event as is, let it be.
if (focused_view_ && focused_view_->SkipDefaultKeyEventProcessing(event) &&
!accelerator_manager_.HasPriorityHandler(accelerator))
return true;
// Intercept Tab related messages for focus traversal.
// Note that we don't do focus traversal if the root window is not part of
// the active window hierarchy as this would mean we have no focused view
// and would focus the first focusable view.
if (IsTabTraversalKeyEvent(event)) {
AdvanceFocus(event.IsShiftDown());
return false;
}
if (arrow_key_traversal_enabled_ && ProcessArrowKeyTraversal(event))
return false;
// Intercept arrow key messages to switch between grouped views.
bool is_left = key_code == ui::VKEY_LEFT || key_code == ui::VKEY_UP;
bool is_right = key_code == ui::VKEY_RIGHT || key_code == ui::VKEY_DOWN;
if (focused_view_ && focused_view_->GetGroup() != -1 &&
(is_left || is_right)) {
bool next = is_right;
View::Views views;
focused_view_->parent()->GetViewsInGroup(focused_view_->GetGroup(),
&views);
View::Views::const_iterator i(
std::find(views.begin(), views.end(), focused_view_));
DCHECK(i != views.end());
size_t index = i - views.begin();
if (next && index == views.size() - 1)
index = 0;
else if (!next && index == 0)
index = views.size() - 1;
else
index += next ? 1 : -1;
SetFocusedViewWithReason(views[index], kReasonFocusTraversal);
return false;
}
}
// Process keyboard accelerators.
// If the key combination matches an accelerator, the accelerator is
// triggered, otherwise the key event is processed as usual.
if (ProcessAccelerator(accelerator)) {
// If a shortcut was activated for this keydown message, do not propagate
// the event further.
return false;
}
return true;
}
// Tests whether a view is valid, whether it still belongs to the window
// hierarchy of the FocusManager.
bool FocusManager::ContainsView(View* view) {
Widget* widget = view->GetWidget();
return widget && widget->GetFocusManager() == this;
}
void FocusManager::AdvanceFocus(bool reverse) {
View* v = GetNextFocusableView(focused_view_, NULL, reverse, false);
// Note: Do not skip this next block when v == focused_view_. If the user
// tabs past the last focusable element in a webpage, we'll get here, and if
// the TabContentsContainerView is the only focusable view (possible in
// fullscreen mode), we need to run this block in order to cycle around to the
// first element on the page.
if (v) {
views::View* focused_view = focused_view_;
v->AboutToRequestFocusFromTabTraversal(reverse);
// AboutToRequestFocusFromTabTraversal() may have changed focus. If it did,
// don't change focus again.
if (focused_view == focused_view_)
SetFocusedViewWithReason(v, kReasonFocusTraversal);
}
}
void FocusManager::ClearNativeFocus() {
// Keep the top root window focused so we get keyboard events.
widget_->ClearNativeFocus();
}
bool FocusManager::RotatePaneFocus(Direction direction,
FocusCycleWrappingBehavior wrap) {
// Get the list of all accessible panes.
std::vector<View*> panes;
widget_->widget_delegate()->GetAccessiblePanes(&panes);
// Count the number of panes and set the default index if no pane
// is initially focused.
int count = static_cast<int>(panes.size());
if (count == 0)
return false;
// Initialize |index| to an appropriate starting index if nothing is
// focused initially.
int index = direction == kBackward ? 0 : count - 1;
// Check to see if a pane already has focus and update the index accordingly.
const views::View* focused_view = GetFocusedView();
if (focused_view) {
for (int i = 0; i < count; i++) {
if (panes[i] && panes[i]->Contains(focused_view)) {
index = i;
break;
}
}
}
// Rotate focus.
int start_index = index;
for (;;) {
if (direction == kBackward)
index--;
else
index++;
if (wrap == kNoWrap && (index >= count || index < 0))
return false;
index = (index + count) % count;
// Ensure that we don't loop more than once.
if (index == start_index)
break;
views::View* pane = panes[index];
DCHECK(pane);
if (!pane->visible())
continue;
pane->RequestFocus();
focused_view = GetFocusedView();
if (pane == focused_view || pane->Contains(focused_view))
return true;
}
return false;
}
View* FocusManager::GetNextFocusableView(View* original_starting_view,
Widget* starting_widget,
bool reverse,
bool dont_loop) {
DCHECK(!focused_view_ || ContainsView(focused_view_))
<< " focus_view=" << focused_view_;
FocusTraversable* focus_traversable = NULL;
View* starting_view = NULL;
if (original_starting_view) {
// Search up the containment hierarchy to see if a view is acting as
// a pane, and wants to implement its own focus traversable to keep
// the focus trapped within that pane.
View* pane_search = original_starting_view;
while (pane_search) {
focus_traversable = pane_search->GetPaneFocusTraversable();
if (focus_traversable) {
starting_view = original_starting_view;
break;
}
pane_search = pane_search->parent();
}
if (!focus_traversable) {
if (!reverse) {
// If the starting view has a focus traversable, use it.
// This is the case with NativeWidgetWins for example.
focus_traversable = original_starting_view->GetFocusTraversable();
// Otherwise default to the root view.
if (!focus_traversable) {
focus_traversable =
original_starting_view->GetWidget()->GetFocusTraversable();
starting_view = original_starting_view;
}
} else {
// When you are going back, starting view's FocusTraversable
// should not be used.
focus_traversable =
original_starting_view->GetWidget()->GetFocusTraversable();
starting_view = original_starting_view;
}
}
} else {
Widget* widget = starting_widget ? starting_widget : widget_;
focus_traversable = widget->GetFocusTraversable();
}
// Traverse the FocusTraversable tree down to find the focusable view.
View* v = FindFocusableView(focus_traversable, starting_view, reverse);
if (v)
return v;
// Let's go up in the FocusTraversable tree.
FocusTraversable* parent_focus_traversable =
focus_traversable->GetFocusTraversableParent();
starting_view = focus_traversable->GetFocusTraversableParentView();
while (parent_focus_traversable) {
FocusTraversable* new_focus_traversable = nullptr;
View* new_starting_view = nullptr;
// When we are going backward, the parent view might gain the next focus.
bool check_starting_view = reverse;
v = parent_focus_traversable->GetFocusSearch()->FindNextFocusableView(
starting_view, reverse, FocusSearch::UP, check_starting_view,
&new_focus_traversable, &new_starting_view);
if (new_focus_traversable) {
DCHECK(!v);
// There is a FocusTraversable, traverse it down.
v = FindFocusableView(new_focus_traversable, nullptr, reverse);
}
if (v)
return v;
starting_view = focus_traversable->GetFocusTraversableParentView();
parent_focus_traversable =
parent_focus_traversable->GetFocusTraversableParent();
}
// If we get here, we have reached the end of the focus hierarchy, let's
// loop. Make sure there was at least a view to start with, to prevent
// infinitely looping in empty windows.
if (dont_loop || !original_starting_view)
return nullptr;
// Easy, just clear the selection and press tab again.
// By calling with NULL as the starting view, we'll start from either
// the starting views widget or |widget_|.
Widget* widget = original_starting_view->GetWidget();
if (widget->widget_delegate()->ShouldAdvanceFocusToTopLevelWidget())
widget = widget_;
return GetNextFocusableView(nullptr, widget, reverse, true);
}
void FocusManager::SetKeyboardAccessible(bool keyboard_accessible) {
if (keyboard_accessible == keyboard_accessible_)
return;
keyboard_accessible_ = keyboard_accessible;
// Disabling keyboard accessibility may cause the focused view to become not
// focusable. Hence advance focus if necessary.
AdvanceFocusIfNecessary();
}
void FocusManager::SetFocusedViewWithReason(View* view,
FocusChangeReason reason) {
if (focused_view_ == view)
return;
// TODO(oshima|achuith): This is to diagnose crbug.com/687232.
// Change this to DCHECK once it's resolved.
CHECK(!view || ContainsView(view));
#if !defined(OS_MACOSX)
// TODO(warx): There are some AccessiblePaneViewTest failed on macosx.
// crbug.com/650859. Remove !defined(OS_MACOSX) once that is fixed.
//
// If the widget isn't active store the focused view and then attempt to
// activate the widget. If activation succeeds |view| will be focused.
// If activation fails |view| will be focused the next time the widget is
// made active.
if (view && !widget_->IsActive()) {
SetStoredFocusView(view);
widget_->Activate();
return;
}
#endif
// Update the reason for the focus change (since this is checked by
// some listeners), then notify all listeners.
focus_change_reason_ = reason;
for (FocusChangeListener& observer : focus_change_listeners_)
observer.OnWillChangeFocus(focused_view_, view);
View* old_focused_view = focused_view_;
focused_view_ = view;
if (old_focused_view) {
old_focused_view->RemoveObserver(this);
old_focused_view->Blur();
}
// Also make |focused_view_| the stored focus view. This way the stored focus
// view is remembered if focus changes are requested prior to a show or while
// hidden.
SetStoredFocusView(focused_view_);
if (focused_view_) {
focused_view_->AddObserver(this);
focused_view_->Focus();
}
for (FocusChangeListener& observer : focus_change_listeners_)
observer.OnDidChangeFocus(old_focused_view, focused_view_);
}
void FocusManager::ClearFocus() {
// SetFocusedView(NULL) is going to clear out the stored view to. We need to
// persist it in this case.
views::View* focused_view = GetStoredFocusView();
SetFocusedView(NULL);
ClearNativeFocus();
SetStoredFocusView(focused_view);
}
void FocusManager::AdvanceFocusIfNecessary() {
// If widget is inactive, there is no focused view to check. The stored view
// will also be checked for focusability when it is being restored.
if (!widget_->IsActive())
return;
// If widget is active and focused view is not focusable, advance focus or,
// if not possible, clear focus.
if (focused_view_ && !IsFocusable(focused_view_)) {
AdvanceFocus(false);
if (focused_view_ && !IsFocusable(focused_view_))
ClearFocus();
}
}
void FocusManager::StoreFocusedView(bool clear_native_focus) {
View* focused_view = focused_view_;
// Don't do anything if no focused view. Storing the view (which is NULL), in
// this case, would clobber the view that was previously saved.
if (!focused_view_)
return;
View* v = focused_view_;
if (clear_native_focus) {
// Temporarily disable notification. ClearFocus() will set the focus to the
// main browser window. This extra focus bounce which happens during
// deactivation can confuse registered WidgetFocusListeners, as the focus
// is not changing due to a user-initiated event.
AutoNativeNotificationDisabler local_notification_disabler;
// ClearFocus() also stores the focused view.
ClearFocus();
} else {
SetFocusedView(NULL);
SetStoredFocusView(focused_view);
}
if (v)
v->SchedulePaint(); // Remove focus border.
}
bool FocusManager::RestoreFocusedView() {
View* view = GetStoredFocusView();
if (view) {
if (ContainsView(view)) {
if (!view->IsFocusable() && view->IsAccessibilityFocusable()) {
// RequestFocus would fail, but we want to restore focus to controls
// that had focus in accessibility mode.
SetFocusedViewWithReason(view, kReasonFocusRestore);
} else {
// This usually just sets the focus if this view is focusable, but
// let the view override RequestFocus if necessary.
view->RequestFocus();
// If it succeeded, the reason would be incorrect; set it to
// focus restore.
if (focused_view_ == view)
focus_change_reason_ = kReasonFocusRestore;
}
}
// The |keyboard_accessible_| mode may have changed while the widget was
// inactive.
AdvanceFocusIfNecessary();
}
return view && view == focused_view_;
}
void FocusManager::SetStoredFocusView(View* focus_view) {
view_tracker_for_stored_view_->SetView(focus_view);
}
View* FocusManager::GetStoredFocusView() {
return view_tracker_for_stored_view_->view();
}
// Find the next (previous if reverse is true) focusable view for the specified
// FocusTraversable, starting at the specified view, traversing down the
// FocusTraversable hierarchy.
View* FocusManager::FindFocusableView(FocusTraversable* focus_traversable,
View* starting_view,
bool reverse) {
FocusTraversable* new_focus_traversable = NULL;
View* new_starting_view = NULL;
View* v = focus_traversable->GetFocusSearch()->FindNextFocusableView(
starting_view, reverse, FocusSearch::DOWN, false, &new_focus_traversable,
&new_starting_view);
// Let's go down the FocusTraversable tree as much as we can.
while (new_focus_traversable) {
DCHECK(!v);
focus_traversable = new_focus_traversable;
new_focus_traversable = NULL;
starting_view = NULL;
v = focus_traversable->GetFocusSearch()->FindNextFocusableView(
starting_view, reverse, FocusSearch::DOWN, false,
&new_focus_traversable, &new_starting_view);
}
return v;
}
void FocusManager::RegisterAccelerator(
const ui::Accelerator& accelerator,
ui::AcceleratorManager::HandlerPriority priority,
ui::AcceleratorTarget* target) {
accelerator_manager_.Register({accelerator}, priority, target);
}
void FocusManager::UnregisterAccelerator(const ui::Accelerator& accelerator,
ui::AcceleratorTarget* target) {
accelerator_manager_.Unregister(accelerator, target);
}
void FocusManager::UnregisterAccelerators(ui::AcceleratorTarget* target) {
accelerator_manager_.UnregisterAll(target);
}
bool FocusManager::ProcessAccelerator(const ui::Accelerator& accelerator) {
if (accelerator_manager_.Process(accelerator))
return true;
return delegate_ && delegate_->ProcessAccelerator(accelerator);
}
bool FocusManager::HasPriorityHandler(
const ui::Accelerator& accelerator) const {
return accelerator_manager_.HasPriorityHandler(accelerator);
}
// static
bool FocusManager::IsTabTraversalKeyEvent(const ui::KeyEvent& key_event) {
return key_event.key_code() == ui::VKEY_TAB && !key_event.IsControlDown();
}
void FocusManager::ViewRemoved(View* removed) {
// If the view being removed contains (or is) the focused view,
// clear the focus. However, it's not safe to call ClearFocus()
// (and in turn ClearNativeFocus()) here because ViewRemoved() can
// be called while the top level widget is being destroyed.
DCHECK(removed);
if (removed->Contains(focused_view_))
SetFocusedView(NULL);
}
void FocusManager::AddFocusChangeListener(FocusChangeListener* listener) {
focus_change_listeners_.AddObserver(listener);
}
void FocusManager::RemoveFocusChangeListener(FocusChangeListener* listener) {
focus_change_listeners_.RemoveObserver(listener);
}
bool FocusManager::ProcessArrowKeyTraversal(const ui::KeyEvent& event) {
if (event.IsShiftDown() || event.IsControlDown() || event.IsAltDown())
return false;
const ui::KeyboardCode key = event.key_code();
if (key != ui::VKEY_UP && key != ui::VKEY_DOWN && key != ui::VKEY_LEFT &&
key != ui::VKEY_RIGHT) {
return false;
}
const ui::KeyboardCode reverse =
base::i18n::IsRTL() ? ui::VKEY_RIGHT : ui::VKEY_LEFT;
AdvanceFocus(key == reverse || key == ui::VKEY_UP);
return true;
}
bool FocusManager::IsFocusable(View* view) const {
DCHECK(view);
// |keyboard_accessible_| is only used on Mac.
#if defined(OS_MACOSX)
return keyboard_accessible_ ? view->IsAccessibilityFocusable()
: view->IsFocusable();
#else
return view->IsAccessibilityFocusable();
#endif
}
void FocusManager::OnViewIsDeleting(View* view) {
// Typically ViewRemoved() is called and all the cleanup happens there. With
// child widgets it's possible to change the parent out from under the Widget
// such that ViewRemoved() is never called.
CHECK_EQ(view, focused_view_);
SetFocusedView(nullptr);
}
} // namespace views
| null | null | null | null | 44,983 |
17,172 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 17,172 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/viz/common/surfaces/local_surface_id.h"
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
// Verifying that Local_Surface_Id::ToString() prints its corresponding
// UnguessableToken as ABCD... if logging is not verbose and prints full
// 16-character token otherwise.
TEST(LocalSurfaceIdTest, VerifyToString) {
const base::UnguessableToken token =
base::UnguessableToken::Deserialize(0x111111, 0);
const base::UnguessableToken big_token =
base::UnguessableToken::Deserialize(0x123456789, 0xABCABCABC);
const base::UnguessableToken small_token =
base::UnguessableToken::Deserialize(0, 0x1);
const viz::LocalSurfaceId local_surface_id(11, 22, token);
const viz::LocalSurfaceId big_local_surface_id(11, 22, big_token);
const viz::LocalSurfaceId small_local_surface_id(11, 22, small_token);
const std::string verbose_expected =
"LocalSurfaceId(11, 22, " + token.ToString() + ")";
const std::string brief_expected =
"LocalSurfaceId(11, 22, " + token.ToString().substr(0, 4) + "...)";
const std::string big_verbose_expected =
"LocalSurfaceId(11, 22, " + big_token.ToString() + ")";
const std::string big_brief_expected =
"LocalSurfaceId(11, 22, " + big_token.ToString().substr(0, 4) + "...)";
const std::string small_verbose_expected =
"LocalSurfaceId(11, 22, " + small_token.ToString() + ")";
const std::string small_brief_expected =
"LocalSurfaceId(11, 22, " + small_token.ToString().substr(0, 4) + "...)";
int previous_log_lvl = logging::GetMinLogLevel();
// When |g_min_log_level| is set to LOG_VERBOSE we expect verbose versions
// of local_surface_id::ToString().
logging::SetMinLogLevel(logging::LOG_VERBOSE);
EXPECT_TRUE(VLOG_IS_ON(1));
EXPECT_EQ(verbose_expected, local_surface_id.ToString());
EXPECT_EQ(big_verbose_expected, big_local_surface_id.ToString());
EXPECT_EQ(small_verbose_expected, small_local_surface_id.ToString());
// When |g_min_log_level| is set to LOG_INFO we expect less verbose versions
// of local_surface_id::ToString().
logging::SetMinLogLevel(logging::LOG_INFO);
EXPECT_FALSE(VLOG_IS_ON(1));
EXPECT_EQ(brief_expected, local_surface_id.ToString());
EXPECT_EQ(big_brief_expected, big_local_surface_id.ToString());
EXPECT_EQ(small_brief_expected, small_local_surface_id.ToString());
logging::SetMinLogLevel(previous_log_lvl);
}
| null | null | null | null | 14,035 |
12,222 | null |
train_val
|
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
| 177,217 |
linux
| 0 |
https://github.com/torvalds/linux
|
2017-05-12 08:32:58+10:00
|
/*
* Routines common to most mpc85xx-based boards.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/fsl_pm.h>
#include <soc/fsl/qe/qe.h>
#include <sysdev/cpm2_pic.h>
#include "mpc85xx.h"
const struct fsl_pm_ops *qoriq_pm_ops;
static const struct of_device_id mpc85xx_common_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .compatible = "simple-bus", },
{ .name = "cpm", },
{ .name = "localbus", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,qe", },
{ .compatible = "fsl,cpm2", },
{ .compatible = "fsl,srio", },
/* So that the DMA channel nodes can be probed individually: */
{ .compatible = "fsl,eloplus-dma", },
/* For the PMC driver */
{ .compatible = "fsl,mpc8548-guts", },
/* Probably unnecessary? */
{ .compatible = "gpio-leds", },
/* For all PCI controllers */
{ .compatible = "fsl,mpc8540-pci", },
{ .compatible = "fsl,mpc8548-pcie", },
{ .compatible = "fsl,p1022-pcie", },
{ .compatible = "fsl,p1010-pcie", },
{ .compatible = "fsl,p1023-pcie", },
{ .compatible = "fsl,p4080-pcie", },
{ .compatible = "fsl,qoriq-pcie-v2.4", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
{ .compatible = "fsl,qoriq-pcie-v2.2", },
{ .compatible = "fsl,fman", },
{},
};
int __init mpc85xx_common_publish_devices(void)
{
return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
}
#ifdef CONFIG_CPM2
static void cpm2_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
void __init mpc85xx_cpm2_pic_init(void)
{
struct device_node *np;
int irq;
/* Setup CPM2 PIC */
np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
if (np == NULL) {
printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
return;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
of_node_put(np);
printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
return;
}
cpm2_pic_init(np);
of_node_put(np);
irq_set_chained_handler(irq, cpm2_cascade);
}
#endif
#ifdef CONFIG_QUICC_ENGINE
void __init mpc85xx_qe_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!np) {
np = of_find_node_by_name(NULL, "qe");
if (!np) {
pr_err("%s: Could not find Quicc Engine node\n",
__func__);
return;
}
}
if (!of_device_is_available(np)) {
of_node_put(np);
return;
}
of_node_put(np);
}
void __init mpc85xx_qe_par_io_init(void)
{
struct device_node *np;
np = of_find_node_by_name(NULL, "par_io");
if (np) {
struct device_node *ucc;
par_io_init(np);
of_node_put(np);
for_each_node_by_name(ucc, "ucc")
par_io_of_config(ucc);
}
}
#endif
| null | null | null | null | 85,564 |
48,665 | null |
train_val
|
796a0e014bc3985709c0a35538d606ef1da31e1b
| 48,665 |
Chrome
| 0 |
https://github.com/chromium/chromium
|
2018-04-07 23:43:03+00:00
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/views/animation/square_ink_drop_ripple.h"
#include <memory>
#include "base/macros.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/geometry/point.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/geometry/size_f.h"
#include "ui/views/animation/ink_drop_ripple_observer.h"
#include "ui/views/animation/ink_drop_state.h"
#include "ui/views/animation/test/square_ink_drop_ripple_test_api.h"
#include "ui/views/animation/test/test_ink_drop_ripple_observer.h"
#include "ui/views/test/widget_test.h"
namespace views {
namespace test {
namespace {
using PaintedShape = views::test::SquareInkDropRippleTestApi::PaintedShape;
// Transforms a copy of |point| with |transform| and returns it.
gfx::Point TransformPoint(const gfx::Transform& transform,
const gfx::Point& point) {
gfx::Point transformed_point = point;
transform.TransformPoint(&transformed_point);
return transformed_point;
}
class SquareInkDropRippleCalculateTransformsTest : public WidgetTest {
public:
SquareInkDropRippleCalculateTransformsTest();
~SquareInkDropRippleCalculateTransformsTest() override;
protected:
// Half the width/height of the drawn ink drop.
static const int kHalfDrawnSize;
// The full width/height of the drawn ink drop.
static const int kDrawnSize;
// The radius of the rounded rectangle corners.
static const int kTransformedRadius;
// Half the width/height of the transformed ink drop.
static const int kHalfTransformedSize;
// The full width/height of the transformed ink drop.
static const int kTransformedSize;
// Constant points in the drawn space that will be transformed.
static const gfx::Point kDrawnCenterPoint;
static const gfx::Point kDrawnMidLeftPoint;
static const gfx::Point kDrawnMidRightPoint;
static const gfx::Point kDrawnTopMidPoint;
static const gfx::Point kDrawnBottomMidPoint;
// The test target.
SquareInkDropRipple ink_drop_ripple_;
// Provides internal access to the test target.
SquareInkDropRippleTestApi test_api_;
// The gfx::Transforms collection that is populated via the
// Calculate*Transforms() calls.
SquareInkDropRippleTestApi::InkDropTransforms transforms_;
private:
DISALLOW_COPY_AND_ASSIGN(SquareInkDropRippleCalculateTransformsTest);
};
const int SquareInkDropRippleCalculateTransformsTest::kHalfDrawnSize = 5;
const int SquareInkDropRippleCalculateTransformsTest::kDrawnSize =
2 * kHalfDrawnSize;
const int SquareInkDropRippleCalculateTransformsTest::kTransformedRadius = 10;
const int SquareInkDropRippleCalculateTransformsTest::kHalfTransformedSize =
100;
const int SquareInkDropRippleCalculateTransformsTest::kTransformedSize =
2 * kHalfTransformedSize;
const gfx::Point SquareInkDropRippleCalculateTransformsTest::kDrawnCenterPoint =
gfx::Point(kHalfDrawnSize, kHalfDrawnSize);
const gfx::Point
SquareInkDropRippleCalculateTransformsTest::kDrawnMidLeftPoint =
gfx::Point(0, kHalfDrawnSize);
const gfx::Point
SquareInkDropRippleCalculateTransformsTest::kDrawnMidRightPoint =
gfx::Point(kDrawnSize, kHalfDrawnSize);
const gfx::Point SquareInkDropRippleCalculateTransformsTest::kDrawnTopMidPoint =
gfx::Point(kHalfDrawnSize, 0);
const gfx::Point
SquareInkDropRippleCalculateTransformsTest::kDrawnBottomMidPoint =
gfx::Point(kHalfDrawnSize, kDrawnSize);
SquareInkDropRippleCalculateTransformsTest::
SquareInkDropRippleCalculateTransformsTest()
: ink_drop_ripple_(gfx::Size(kDrawnSize, kDrawnSize),
2,
gfx::Size(kHalfDrawnSize, kHalfDrawnSize),
1,
gfx::Point(),
SK_ColorBLACK,
0.175f),
test_api_(&ink_drop_ripple_) {}
SquareInkDropRippleCalculateTransformsTest::
~SquareInkDropRippleCalculateTransformsTest() {}
} // namespace
TEST_F(SquareInkDropRippleCalculateTransformsTest,
TransformedPointsUsingTransformsFromCalculateCircleTransforms) {
test_api_.CalculateCircleTransforms(
gfx::Size(kTransformedSize, kTransformedSize), &transforms_);
struct {
PaintedShape shape;
gfx::Point center_point;
gfx::Point mid_left_point;
gfx::Point mid_right_point;
gfx::Point top_mid_point;
gfx::Point bottom_mid_point;
} test_cases[] = {
{PaintedShape::TOP_LEFT_CIRCLE, gfx::Point(0, 0),
gfx::Point(-kHalfTransformedSize, 0),
gfx::Point(kHalfTransformedSize, 0),
gfx::Point(0, -kHalfTransformedSize),
gfx::Point(0, kHalfTransformedSize)},
{PaintedShape::TOP_RIGHT_CIRCLE, gfx::Point(0, 0),
gfx::Point(-kHalfTransformedSize, 0),
gfx::Point(kHalfTransformedSize, 0),
gfx::Point(0, -kHalfTransformedSize),
gfx::Point(0, kHalfTransformedSize)},
{PaintedShape::BOTTOM_RIGHT_CIRCLE, gfx::Point(0, 0),
gfx::Point(-kHalfTransformedSize, 0),
gfx::Point(kHalfTransformedSize, 0),
gfx::Point(0, -kHalfTransformedSize),
gfx::Point(0, kHalfTransformedSize)},
{PaintedShape::BOTTOM_LEFT_CIRCLE, gfx::Point(0, 0),
gfx::Point(-kHalfTransformedSize, 0),
gfx::Point(kHalfTransformedSize, 0),
gfx::Point(0, -kHalfTransformedSize),
gfx::Point(0, kHalfTransformedSize)},
{PaintedShape::HORIZONTAL_RECT, gfx::Point(0, 0),
gfx::Point(-kHalfTransformedSize, 0),
gfx::Point(kHalfTransformedSize, 0), gfx::Point(0, 0), gfx::Point(0, 0)},
{PaintedShape::VERTICAL_RECT, gfx::Point(0, 0), gfx::Point(0, 0),
gfx::Point(0, 0), gfx::Point(0, -kHalfTransformedSize),
gfx::Point(0, kHalfTransformedSize)}};
for (size_t i = 0; i < arraysize(test_cases); ++i) {
PaintedShape shape = test_cases[i].shape;
SCOPED_TRACE(testing::Message() << "i=" << i << " shape=" << shape);
gfx::Transform transform = transforms_[shape];
EXPECT_EQ(test_cases[i].center_point,
TransformPoint(transform, kDrawnCenterPoint));
EXPECT_EQ(test_cases[i].mid_left_point,
TransformPoint(transform, kDrawnMidLeftPoint));
EXPECT_EQ(test_cases[i].mid_right_point,
TransformPoint(transform, kDrawnMidRightPoint));
EXPECT_EQ(test_cases[i].top_mid_point,
TransformPoint(transform, kDrawnTopMidPoint));
EXPECT_EQ(test_cases[i].bottom_mid_point,
TransformPoint(transform, kDrawnBottomMidPoint));
}
}
TEST_F(SquareInkDropRippleCalculateTransformsTest,
TransformedPointsUsingTransformsFromCalculateRectTransforms) {
test_api_.CalculateRectTransforms(
gfx::Size(kTransformedSize, kTransformedSize), kTransformedRadius,
&transforms_);
const int x_offset = kHalfTransformedSize - kTransformedRadius;
const int y_offset = kHalfTransformedSize - kTransformedRadius;
struct {
PaintedShape shape;
gfx::Point center_point;
gfx::Point mid_left_point;
gfx::Point mid_right_point;
gfx::Point top_mid_point;
gfx::Point bottom_mid_point;
} test_cases[] = {
{PaintedShape::TOP_LEFT_CIRCLE, gfx::Point(-x_offset, -y_offset),
gfx::Point(-kHalfTransformedSize, -y_offset),
gfx::Point(-x_offset + kTransformedRadius, -y_offset),
gfx::Point(-x_offset, -kHalfTransformedSize),
gfx::Point(-x_offset, -y_offset + kTransformedRadius)},
{PaintedShape::TOP_RIGHT_CIRCLE, gfx::Point(x_offset, -y_offset),
gfx::Point(x_offset - kTransformedRadius, -y_offset),
gfx::Point(kHalfTransformedSize, -y_offset),
gfx::Point(x_offset, -kHalfTransformedSize),
gfx::Point(x_offset, -y_offset + kTransformedRadius)},
{PaintedShape::BOTTOM_RIGHT_CIRCLE, gfx::Point(x_offset, y_offset),
gfx::Point(x_offset - kTransformedRadius, y_offset),
gfx::Point(kHalfTransformedSize, y_offset),
gfx::Point(x_offset, y_offset - kTransformedRadius),
gfx::Point(x_offset, kHalfTransformedSize)},
{PaintedShape::BOTTOM_LEFT_CIRCLE, gfx::Point(-x_offset, y_offset),
gfx::Point(-kHalfTransformedSize, y_offset),
gfx::Point(-x_offset + kTransformedRadius, y_offset),
gfx::Point(-x_offset, y_offset - kTransformedRadius),
gfx::Point(-x_offset, kHalfTransformedSize)},
{PaintedShape::HORIZONTAL_RECT, gfx::Point(0, 0),
gfx::Point(-kHalfTransformedSize, 0),
gfx::Point(kHalfTransformedSize, 0), gfx::Point(0, -y_offset),
gfx::Point(0, y_offset)},
{PaintedShape::VERTICAL_RECT, gfx::Point(0, 0), gfx::Point(-x_offset, 0),
gfx::Point(x_offset, 0), gfx::Point(0, -kHalfTransformedSize),
gfx::Point(0, kHalfTransformedSize)}};
for (size_t i = 0; i < arraysize(test_cases); ++i) {
PaintedShape shape = test_cases[i].shape;
SCOPED_TRACE(testing::Message() << "i=" << i << " shape=" << shape);
gfx::Transform transform = transforms_[shape];
EXPECT_EQ(test_cases[i].center_point,
TransformPoint(transform, kDrawnCenterPoint));
EXPECT_EQ(test_cases[i].mid_left_point,
TransformPoint(transform, kDrawnMidLeftPoint));
EXPECT_EQ(test_cases[i].mid_right_point,
TransformPoint(transform, kDrawnMidRightPoint));
EXPECT_EQ(test_cases[i].top_mid_point,
TransformPoint(transform, kDrawnTopMidPoint));
EXPECT_EQ(test_cases[i].bottom_mid_point,
TransformPoint(transform, kDrawnBottomMidPoint));
}
}
TEST_F(SquareInkDropRippleCalculateTransformsTest, RippleIsPixelAligned) {
// Create a ripple that would not naturally be pixel aligned at a fractional
// scale factor.
const gfx::Point center(14, 14);
const gfx::Rect drawn_rect_bounds(0, 0, 10, 10);
SquareInkDropRipple ink_drop_ripple(drawn_rect_bounds.size(), 2,
gfx::Size(1, 1), // unimportant
1, center, SK_ColorBLACK, 0.175f);
SquareInkDropRippleTestApi test_api(&ink_drop_ripple);
// Add to a widget so we can control the DSF.
auto* widget = CreateTopLevelPlatformWidget();
widget->SetBounds(gfx::Rect(0, 0, 100, 100));
auto* host_view = new View();
host_view->SetPaintToLayer();
widget->GetContentsView()->AddChildView(host_view);
host_view->layer()->Add(ink_drop_ripple.GetRootLayer());
// Test a variety of scale factors and target transform sizes.
std::vector<float> dsfs({1.0f, 1.25f, 1.5f, 2.0f, 3.0f});
std::vector<int> target_sizes({5, 7, 11, 13, 31});
for (float dsf : dsfs) {
for (int target_size : target_sizes) {
SCOPED_TRACE(testing::Message()
<< "target_size=" << target_size << " dsf=" << dsf);
host_view->layer()->GetCompositor()->SetScaleAndSize(
dsf, gfx::Size(100, 100), viz::LocalSurfaceId());
SquareInkDropRippleTestApi::InkDropTransforms transforms;
test_api.CalculateRectTransforms(gfx::Size(target_size, target_size), 0,
&transforms);
// Checks that a rectangle is integer-aligned modulo floating point error.
auto verify_bounds = [](const gfx::RectF& rect) {
float float_min_x = rect.x();
float float_min_y = rect.y();
float float_max_x = rect.right();
float float_max_y = rect.bottom();
int min_x = gfx::ToRoundedInt(float_min_x);
int min_y = gfx::ToRoundedInt(float_min_y);
int max_x = gfx::ToRoundedInt(float_max_x);
int max_y = gfx::ToRoundedInt(float_max_y);
EXPECT_LT(std::abs(min_x - float_min_x), 0.01f);
EXPECT_LT(std::abs(min_y - float_min_y), 0.01f);
EXPECT_LT(std::abs(max_x - float_max_x), 0.01f);
EXPECT_LT(std::abs(max_y - float_max_y), 0.01f);
};
// When you feed in the bounds of the rectangle layer delegate, no matter
// what the target size was you should get an integer aligned bounding
// box.
gfx::Transform transform = transforms[PaintedShape::HORIZONTAL_RECT];
gfx::RectF horizontal_rect(drawn_rect_bounds);
transform.TransformRect(&horizontal_rect);
horizontal_rect.Scale(dsf);
verify_bounds(horizontal_rect);
transform = transforms[PaintedShape::VERTICAL_RECT];
gfx::RectF vertical_rect(drawn_rect_bounds);
transform.TransformRect(&vertical_rect);
vertical_rect.Scale(dsf);
verify_bounds(vertical_rect);
}
}
widget->CloseNow();
}
} // namespace test
} // namespace views
| null | null | null | null | 45,528 |
Subsets and Splits
CWE Frequency in Train Set
Provides a count of vulnerabilities grouped by CWE ID, highlighting the most frequent types of vulnerabilities in the dataset.