file_name
int64
0
72.3k
vulnerable_line_numbers
stringlengths
1
1.06k
dataset_type
stringclasses
1 value
commit_hash
stringlengths
40
44
unique_id
int64
0
271k
project
stringclasses
10 values
target
int64
0
1
repo_url
stringclasses
10 values
date
stringlengths
25
25
code
stringlengths
0
20.4M
CVE
stringlengths
13
43
CWE
stringclasses
50 values
commit_link
stringlengths
73
97
severity
stringclasses
4 values
__index_level_0__
int64
0
124k
37,692
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
202,687
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#include "util/util.h" #include "builtin.h" #include "perf.h" int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused, const char *prefix __maybe_unused) { printf("perf version %s\n", perf_version_string); return 0; }
null
null
null
null
111,034
20,512
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
185,507
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 */ #include "fm10k_tlv.h" /** * fm10k_tlv_msg_init - Initialize message block for TLV data storage * @msg: Pointer to message block * @msg_id: Message ID indicating message type * * This function return success if provided with a valid message pointer **/ s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) { /* verify pointer is not NULL */ if (!msg) return FM10K_ERR_PARAM; *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id; return 0; } /** * fm10k_tlv_attr_put_null_string - Place null terminated string on message * @msg: Pointer to message block * @attr_id: Attribute ID * @string: Pointer to string to be stored in attribute * * This function will reorder a string to be CPU endian and store it in * the attribute buffer. It will return success if provided with a valid * pointers. **/ static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, const unsigned char *string) { u32 attr_data = 0, len = 0; u32 *attr; /* verify pointers are not NULL */ if (!string || !msg) return FM10K_ERR_PARAM; attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; /* copy string into local variable and then write to msg */ do { /* write data to message */ if (len && !(len % 4)) { attr[len / 4] = attr_data; attr_data = 0; } /* record character to offset location */ attr_data |= (u32)(*string) << (8 * (len % 4)); len++; /* test for NULL and then increment */ } while (*(string++)); /* write last piece of data to message */ attr[(len + 3) / 4] = attr_data; /* record attribute header, update message length */ len <<= FM10K_TLV_LEN_SHIFT; attr[0] = len | attr_id; /* add header length to length */ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; *msg += FM10K_TLV_LEN_ALIGN(len); return 0; } /** * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute * @attr: Pointer to attribute * @string: Pointer to location of destination string * * This function pulls the string back out of the attribute and will place * it in the array pointed by by string. It will return success if provided * with a valid pointers. **/ static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) { u32 len; /* verify pointers are not NULL */ if (!string || !attr) return FM10K_ERR_PARAM; len = *attr >> FM10K_TLV_LEN_SHIFT; attr++; while (len--) string[len] = (u8)(attr[len / 4] >> (8 * (len % 4))); return 0; } /** * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message * @msg: Pointer to message block * @attr_id: Attribute ID * @mac_addr: MAC address to be stored * * This function will reorder a MAC address to be CPU endian and store it * in the attribute buffer. It will return success if provided with a * valid pointers. **/ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, const u8 *mac_addr, u16 vlan) { u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT; u32 *attr; /* verify pointers are not NULL */ if (!msg || !mac_addr) return FM10K_ERR_PARAM; attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; /* record attribute header, update message length */ attr[0] = len | attr_id; /* copy value into local variable and then write to msg */ attr[1] = le32_to_cpu(*(const __le32 *)&mac_addr[0]); attr[2] = le16_to_cpu(*(const __le16 *)&mac_addr[4]); attr[2] |= (u32)vlan << 16; /* add header length to length */ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; *msg += FM10K_TLV_LEN_ALIGN(len); return 0; } /** * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute * @attr: Pointer to attribute * @attr_id: Attribute ID * @mac_addr: location of buffer to store MAC address * * This function pulls the MAC address back out of the attribute and will * place it in the array pointed by by mac_addr. It will return success * if provided with a valid pointers. **/ s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan) { /* verify pointers are not NULL */ if (!mac_addr || !attr) return FM10K_ERR_PARAM; *(__le32 *)&mac_addr[0] = cpu_to_le32(attr[1]); *(__le16 *)&mac_addr[4] = cpu_to_le16((u16)(attr[2])); *vlan = (u16)(attr[2] >> 16); return 0; } /** * fm10k_tlv_attr_put_bool - Add header indicating value "true" * @msg: Pointer to message block * @attr_id: Attribute ID * * This function will simply add an attribute header, the fact * that the header is here means the attribute value is true, else * it is false. The function will return success if provided with a * valid pointers. **/ s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id) { /* verify pointers are not NULL */ if (!msg) return FM10K_ERR_PARAM; /* record attribute header */ msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id; /* add header length to length */ *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; return 0; } /** * fm10k_tlv_attr_put_value - Store integer value attribute in message * @msg: Pointer to message block * @attr_id: Attribute ID * @value: Value to be written * @len: Size of value * * This function will place an integer value of up to 8 bytes in size * in a message attribute. The function will return success provided * that msg is a valid pointer, and len is 1, 2, 4, or 8. **/ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) { u32 *attr; /* verify non-null msg and len is 1, 2, 4, or 8 */ if (!msg || !len || len > 8 || (len & (len - 1))) return FM10K_ERR_PARAM; attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; if (len < 4) { attr[1] = (u32)value & (BIT(8 * len) - 1); } else { attr[1] = (u32)value; if (len > 4) attr[2] = (u32)(value >> 32); } /* record attribute header, update message length */ len <<= FM10K_TLV_LEN_SHIFT; attr[0] = len | attr_id; /* add header length to length */ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; *msg += FM10K_TLV_LEN_ALIGN(len); return 0; } /** * fm10k_tlv_attr_get_value - Get integer value stored in attribute * @attr: Pointer to attribute * @value: Pointer to destination buffer * @len: Size of value * * This function will place an integer value of up to 8 bytes in size * in the offset pointed to by value. The function will return success * provided that pointers are valid and the len value matches the * attribute length. **/ s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len) { /* verify pointers are not NULL */ if (!attr || !value) return FM10K_ERR_PARAM; if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) return FM10K_ERR_PARAM; if (len == 8) *(u64 *)value = ((u64)attr[2] << 32) | attr[1]; else if (len == 4) *(u32 *)value = attr[1]; else if (len == 2) *(u16 *)value = (u16)attr[1]; else *(u8 *)value = (u8)attr[1]; return 0; } /** * fm10k_tlv_attr_put_le_struct - Store little endian structure in message * @msg: Pointer to message block * @attr_id: Attribute ID * @le_struct: Pointer to structure to be written * @len: Size of le_struct * * This function will place a little endian structure value in a message * attribute. The function will return success provided that all pointers * are valid and length is a non-zero multiple of 4. **/ s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id, const void *le_struct, u32 len) { const __le32 *le32_ptr = (const __le32 *)le_struct; u32 *attr; u32 i; /* verify non-null msg and len is in 32 bit words */ if (!msg || !len || (len % 4)) return FM10K_ERR_PARAM; attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; /* copy le32 structure into host byte order at 32b boundaries */ for (i = 0; i < (len / 4); i++) attr[i + 1] = le32_to_cpu(le32_ptr[i]); /* record attribute header, update message length */ len <<= FM10K_TLV_LEN_SHIFT; attr[0] = len | attr_id; /* add header length to length */ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; *msg += FM10K_TLV_LEN_ALIGN(len); return 0; } /** * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute * @attr: Pointer to attribute * @le_struct: Pointer to structure to be written * @len: Size of structure * * This function will place a little endian structure in the buffer * pointed to by le_struct. The function will return success * provided that pointers are valid and the len value matches the * attribute length. **/ s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) { __le32 *le32_ptr = (__le32 *)le_struct; u32 i; /* verify pointers are not NULL */ if (!le_struct || !attr) return FM10K_ERR_PARAM; if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) return FM10K_ERR_PARAM; attr++; for (i = 0; len; i++, len -= 4) le32_ptr[i] = cpu_to_le32(attr[i]); return 0; } /** * fm10k_tlv_attr_nest_start - Start a set of nested attributes * @msg: Pointer to message block * @attr_id: Attribute ID * * This function will mark off a new nested region for encapsulating * a given set of attributes. The idea is if you wish to place a secondary * structure within the message this mechanism allows for that. The * function will return NULL on failure, and a pointer to the start * of the nested attributes on success. **/ static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) { u32 *attr; /* verify pointer is not NULL */ if (!msg) return NULL; attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; attr[0] = attr_id; /* return pointer to nest header */ return attr; } /** * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes * @msg: Pointer to message block * * This function closes off an existing set of nested attributes. The * message pointer should be pointing to the parent of the nest. So in * the case of a nest within the nest this would be the outer nest pointer. * This function will return success provided all pointers are valid. **/ static s32 fm10k_tlv_attr_nest_stop(u32 *msg) { u32 *attr; u32 len; /* verify pointer is not NULL */ if (!msg) return FM10K_ERR_PARAM; /* locate the nested header and retrieve its length */ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT; /* only include nest if data was added to it */ if (len) { len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; *msg += len; } return 0; } /** * fm10k_tlv_attr_validate - Validate attribute metadata * @attr: Pointer to attribute * @tlv_attr: Type and length info for attribute * * This function does some basic validation of the input TLV. It * verifies the length, and in the case of null terminated strings * it verifies that the last byte is null. The function will * return FM10K_ERR_PARAM if any attribute is malformed, otherwise * it returns 0. **/ static s32 fm10k_tlv_attr_validate(u32 *attr, const struct fm10k_tlv_attr *tlv_attr) { u32 attr_id = *attr & FM10K_TLV_ID_MASK; u16 len = *attr >> FM10K_TLV_LEN_SHIFT; /* verify this is an attribute and not a message */ if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)) return FM10K_ERR_PARAM; /* search through the list of attributes to find a matching ID */ while (tlv_attr->id < attr_id) tlv_attr++; /* if didn't find a match then we should exit */ if (tlv_attr->id != attr_id) return FM10K_NOT_IMPLEMENTED; /* move to start of attribute data */ attr++; switch (tlv_attr->type) { case FM10K_TLV_NULL_STRING: if (!len || (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4))))) return FM10K_ERR_PARAM; if (len > tlv_attr->len) return FM10K_ERR_PARAM; break; case FM10K_TLV_MAC_ADDR: if (len != ETH_ALEN) return FM10K_ERR_PARAM; break; case FM10K_TLV_BOOL: if (len) return FM10K_ERR_PARAM; break; case FM10K_TLV_UNSIGNED: case FM10K_TLV_SIGNED: if (len != tlv_attr->len) return FM10K_ERR_PARAM; break; case FM10K_TLV_LE_STRUCT: /* struct must be 4 byte aligned */ if ((len % 4) || len != tlv_attr->len) return FM10K_ERR_PARAM; break; case FM10K_TLV_NESTED: /* nested attributes must be 4 byte aligned */ if (len % 4) return FM10K_ERR_PARAM; break; default: /* attribute id is mapped to bad value */ return FM10K_ERR_PARAM; } return 0; } /** * fm10k_tlv_attr_parse - Parses stream of attribute data * @attr: Pointer to attribute list * @results: Pointer array to store pointers to attributes * @tlv_attr: Type and length info for attributes * * This function validates a stream of attributes and parses them * up into an array of pointers stored in results. The function will * return FM10K_ERR_PARAM on any input or message error, * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array * and 0 on success. Any attributes not found in tlv_attr will be silently * ignored. **/ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, const struct fm10k_tlv_attr *tlv_attr) { u32 i, attr_id, offset = 0; s32 err = 0; u16 len; /* verify pointers are not NULL */ if (!attr || !results) return FM10K_ERR_PARAM; /* initialize results to NULL */ for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++) results[i] = NULL; /* pull length from the message header */ len = *attr >> FM10K_TLV_LEN_SHIFT; /* no attributes to parse if there is no length */ if (!len) return 0; /* no attributes to parse, just raw data, message becomes attribute */ if (!tlv_attr) { results[0] = attr; return 0; } /* move to start of attribute data */ attr++; /* run through list parsing all attributes */ while (offset < len) { attr_id = *attr & FM10K_TLV_ID_MASK; if (attr_id >= FM10K_TLV_RESULTS_MAX) return FM10K_NOT_IMPLEMENTED; err = fm10k_tlv_attr_validate(attr, tlv_attr); if (err == FM10K_NOT_IMPLEMENTED) ; /* silently ignore non-implemented attributes */ else if (err) return err; else results[attr_id] = attr; /* update offset */ offset += FM10K_TLV_DWORD_LEN(*attr) * 4; /* move to next attribute */ attr = &attr[FM10K_TLV_DWORD_LEN(*attr)]; } /* we should find ourselves at the end of the list */ if (offset != len) return FM10K_ERR_PARAM; return 0; } /** * fm10k_tlv_msg_parse - Parses message header and calls function handler * @hw: Pointer to hardware structure * @msg: Pointer to message * @mbx: Pointer to mailbox information structure * @func: Function array containing list of message handling functions * * This function should be the first function called upon receiving a * message. The handler will identify the message type and call the correct * handler for the given message. It will return the value from the function * call on a recognized message type, otherwise it will return * FM10K_NOT_IMPLEMENTED on an unrecognized type. **/ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, struct fm10k_mbx_info *mbx, const struct fm10k_msg_data *data) { u32 *results[FM10K_TLV_RESULTS_MAX]; u32 msg_id; s32 err; /* verify pointer is not NULL */ if (!msg || !data) return FM10K_ERR_PARAM; /* verify this is a message and not an attribute */ if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))) return FM10K_ERR_PARAM; /* grab message ID */ msg_id = *msg & FM10K_TLV_ID_MASK; while (data->id < msg_id) data++; /* if we didn't find it then pass it up as an error */ if (data->id != msg_id) { while (data->id != FM10K_TLV_ERROR) data++; } /* parse the attributes into the results list */ err = fm10k_tlv_attr_parse(msg, results, data->attr); if (err < 0) return err; return data->func(hw, results, mbx); } /** * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs * @hw: Pointer to hardware structure * @results: Pointer array to message, results[0] is pointer to message * @mbx: Unused mailbox pointer * * This function is a default handler for unrecognized messages. At a * a minimum it just indicates that the message requested was * unimplemented. **/ s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { return FM10K_NOT_IMPLEMENTED; } static const unsigned char test_str[] = "fm10k"; static const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc }; static const u16 test_vlan = 0x0FED; static const u64 test_u64 = 0xfedcba9876543210ull; static const u32 test_u32 = 0x87654321; static const u16 test_u16 = 0x8765; static const u8 test_u8 = 0x87; static const s64 test_s64 = -0x123456789abcdef0ll; static const s32 test_s32 = -0x1235678; static const s16 test_s16 = -0x1234; static const s8 test_s8 = -0x12; static const __le32 test_le[2] = { cpu_to_le32(0x12345678), cpu_to_le32(0x9abcdef0)}; /* The message below is meant to be used as a test message to demonstrate * how to use the TLV interface and to test the types. Normally this code * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG */ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = { FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80), FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR), FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8), FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16), FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32), FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64), FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8), FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16), FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32), FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64), FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8), FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED), FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT), FM10K_TLV_ATTR_LAST }; /** * fm10k_tlv_msg_test_generate_data - Stuff message with data * @msg: Pointer to message * @attr_flags: List of flags indicating what attributes to add * * This function is meant to load a message buffer with attribute data **/ static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) { if (attr_flags & BIT(FM10K_TEST_MSG_STRING)) fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, test_str); if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR)) fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, test_mac, test_vlan); if (attr_flags & BIT(FM10K_TEST_MSG_U8)) fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); if (attr_flags & BIT(FM10K_TEST_MSG_U16)) fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); if (attr_flags & BIT(FM10K_TEST_MSG_U32)) fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); if (attr_flags & BIT(FM10K_TEST_MSG_U64)) fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); if (attr_flags & BIT(FM10K_TEST_MSG_S8)) fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); if (attr_flags & BIT(FM10K_TEST_MSG_S16)) fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); if (attr_flags & BIT(FM10K_TEST_MSG_S32)) fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); if (attr_flags & BIT(FM10K_TEST_MSG_S64)) fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT)) fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, test_le, 8); } /** * fm10k_tlv_msg_test_create - Create a test message testing all attributes * @msg: Pointer to message * @attr_flags: List of flags indicating what attributes to add * * This function is meant to load a message buffer with all attribute types * including a nested attribute. **/ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) { u32 *nest = NULL; fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST); fm10k_tlv_msg_test_generate_data(msg, attr_flags); /* check for nested attributes */ attr_flags >>= FM10K_TEST_MSG_NESTED; if (attr_flags) { nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED); fm10k_tlv_msg_test_generate_data(nest, attr_flags); fm10k_tlv_attr_nest_stop(msg); } } /** * fm10k_tlv_msg_test - Validate all results on test message receive * @hw: Pointer to hardware structure * @results: Pointer array to attributes in the message * @mbx: Pointer to mailbox information structure * * This function does a check to verify all attributes match what the test * message placed in the message buffer. It is the default handler * for TLV test messages. **/ s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { u32 *nest_results[FM10K_TLV_RESULTS_MAX]; unsigned char result_str[80]; unsigned char result_mac[ETH_ALEN]; s32 err = 0; __le32 result_le[2]; u16 result_vlan; u64 result_u64; u32 result_u32; u16 result_u16; u8 result_u8; s64 result_s64; s32 result_s32; s16 result_s16; s8 result_s8; u32 reply[3]; /* retrieve results of a previous test */ if (!!results[FM10K_TEST_MSG_RESULT]) return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT], &mbx->test_result); parse_nested: if (!!results[FM10K_TEST_MSG_STRING]) { err = fm10k_tlv_attr_get_null_string( results[FM10K_TEST_MSG_STRING], result_str); if (!err && memcmp(test_str, result_str, sizeof(test_str))) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_MAC_ADDR]) { err = fm10k_tlv_attr_get_mac_vlan( results[FM10K_TEST_MSG_MAC_ADDR], result_mac, &result_vlan); if (!err && !ether_addr_equal(test_mac, result_mac)) err = FM10K_ERR_INVALID_VALUE; if (!err && test_vlan != result_vlan) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_U8]) { err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8], &result_u8); if (!err && test_u8 != result_u8) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_U16]) { err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16], &result_u16); if (!err && test_u16 != result_u16) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_U32]) { err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32], &result_u32); if (!err && test_u32 != result_u32) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_U64]) { err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64], &result_u64); if (!err && test_u64 != result_u64) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_S8]) { err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8], &result_s8); if (!err && test_s8 != result_s8) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_S16]) { err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16], &result_s16); if (!err && test_s16 != result_s16) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_S32]) { err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32], &result_s32); if (!err && test_s32 != result_s32) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_S64]) { err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64], &result_s64); if (!err && test_s64 != result_s64) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_LE_STRUCT]) { err = fm10k_tlv_attr_get_le_struct( results[FM10K_TEST_MSG_LE_STRUCT], result_le, sizeof(result_le)); if (!err && memcmp(test_le, result_le, sizeof(test_le))) err = FM10K_ERR_INVALID_VALUE; if (err) goto report_result; } if (!!results[FM10K_TEST_MSG_NESTED]) { /* clear any pointers */ memset(nest_results, 0, sizeof(nest_results)); /* parse the nested attributes into the nest results list */ err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED], nest_results, fm10k_tlv_msg_test_attr); if (err) goto report_result; /* loop back through to the start */ results = nest_results; goto parse_nested; } report_result: /* generate reply with test result */ fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST); fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, reply); }
null
null
null
null
93,854
13,355
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
13,355
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_HANDOFF_PREF_NAMES_IOS_H_ #define COMPONENTS_HANDOFF_PREF_NAMES_IOS_H_ namespace prefs { extern const char kIosHandoffToOtherDevices[]; } // namespace prefs #endif // COMPONENTS_HANDOFF_PREF_NAMES_IOS_H_
null
null
null
null
10,218
504
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
263,072
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
/* * QTest testcase for ES1370 * * Copyright (c) 2014 SUSE LINUX Products GmbH * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "libqtest.h" /* Tests only initialization so far. TODO: Replace with functional tests */ static void nop(void) { } int main(int argc, char **argv) { int ret; g_test_init(&argc, &argv, NULL); qtest_add_func("/es1370/nop", nop); qtest_start("-device ES1370"); ret = g_test_run(); qtest_end(); return ret; }
null
null
null
null
121,196
68,393
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
68,393
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "remoting/host/audio_silence_detector.h" #include <stdint.h> #include "base/macros.h" #include "testing/gtest/include/gtest/gtest.h" namespace remoting { namespace { const int kSamplingRate = 1000; void TestSilenceDetector(AudioSilenceDetector* target, const int16_t* samples, int samples_count, bool silence_expected) { target->Reset(kSamplingRate, 1); bool silence_started = false; int threshold_length = 0; for (int i = 0; i < 3 * kSamplingRate / samples_count; ++i) { bool result = target->IsSilence(samples, samples_count); if (silence_started) { ASSERT_TRUE(result); } else if (result) { silence_started = true; threshold_length = i * samples_count; } } // Check that the silence was detected if it was expected. EXPECT_EQ(silence_expected, silence_started); if (silence_expected) { // Check that silence threshold is between 0.5 and 2 seconds. EXPECT_GE(threshold_length, kSamplingRate / 2); EXPECT_LE(threshold_length, kSamplingRate * 2); } } } // namespace TEST(AudioSilenceDetectorTest, Silence) { const int16_t kSamples[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; AudioSilenceDetector target(0); TestSilenceDetector(&target, kSamples, arraysize(kSamples), true); } TEST(AudioSilenceDetectorTest, Sound) { const int16_t kSamples[] = {65, 73, 83, 89, 92, -1, 5, 9, 123, 0}; AudioSilenceDetector target(0); TestSilenceDetector(&target, kSamples, arraysize(kSamples), false); } TEST(AudioSilenceDetectorTest, Threshold) { const int16_t kSamples[] = {0, 0, 0, 0, 1, 0, 0, -1, 0, 0}; AudioSilenceDetector target1(0); TestSilenceDetector(&target1, kSamples, arraysize(kSamples), false); AudioSilenceDetector target2(1); TestSilenceDetector(&target2, kSamples, arraysize(kSamples), true); } } // namespace remoting
null
null
null
null
65,256
1,970
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
264,538
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
/* * TI OMAP2 32kHz sync timer emulation. * * Copyright (C) 2007-2008 Nokia Corporation * Written by Andrzej Zaborowski <andrew@openedhand.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 or * (at your option) any later version of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "hw/hw.h" #include "qemu/timer.h" #include "hw/arm/omap.h" struct omap_synctimer_s { MemoryRegion iomem; uint32_t val; uint16_t readh; }; /* 32-kHz Sync Timer of the OMAP2 */ static uint32_t omap_synctimer_read(struct omap_synctimer_s *s) { return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 0x8000, NANOSECONDS_PER_SECOND); } void omap_synctimer_reset(struct omap_synctimer_s *s) { s->val = omap_synctimer_read(s); } static uint32_t omap_synctimer_readw(void *opaque, hwaddr addr) { struct omap_synctimer_s *s = (struct omap_synctimer_s *) opaque; switch (addr) { case 0x00: /* 32KSYNCNT_REV */ return 0x21; case 0x10: /* CR */ return omap_synctimer_read(s) - s->val; } OMAP_BAD_REG(addr); return 0; } static uint32_t omap_synctimer_readh(void *opaque, hwaddr addr) { struct omap_synctimer_s *s = (struct omap_synctimer_s *) opaque; uint32_t ret; if (addr & 2) return s->readh; else { ret = omap_synctimer_readw(opaque, addr); s->readh = ret >> 16; return ret & 0xffff; } } static void omap_synctimer_write(void *opaque, hwaddr addr, uint32_t value) { OMAP_BAD_REG(addr); } static const MemoryRegionOps omap_synctimer_ops = { .old_mmio = { .read = { omap_badwidth_read32, omap_synctimer_readh, omap_synctimer_readw, }, .write = { omap_badwidth_write32, omap_synctimer_write, omap_synctimer_write, }, }, .endianness = DEVICE_NATIVE_ENDIAN, }; struct omap_synctimer_s *omap_synctimer_init(struct omap_target_agent_s *ta, struct omap_mpu_state_s *mpu, omap_clk fclk, omap_clk iclk) { struct omap_synctimer_s *s = g_malloc0(sizeof(*s)); omap_synctimer_reset(s); memory_region_init_io(&s->iomem, NULL, &omap_synctimer_ops, s, "omap.synctimer", omap_l4_region_size(ta, 0)); omap_l4_attach(ta, 0, &s->iomem); return s; }
null
null
null
null
122,662
574
null
train_val
a6802e21d824e786d1e2a8440cf749a6e1a8d95f
160,702
ImageMagick
0
https://github.com/ImageMagick/ImageMagick
2017-07-18 18:28:29-04:00
// This may look like C code, but it is really -*- C++ -*- // // Copyright Bob Friesenhahn, 1999, 2000, 2001, 2002 // Copyright Dirk Lemstra 2014 // // Representation of a pixel view. // #if !defined(Magick_Pixels_header) #define Magick_Pixels_header #include "Magick++/Include.h" #include "Magick++/Color.h" #include "Magick++/Image.h" namespace Magick { class MagickPPExport Pixels { public: // Construct pixel view using specified image. Pixels(Magick::Image &image_); // Destroy pixel view ~Pixels(void); // Transfer pixels from the image to the pixel view as defined by // the specified region. Modified pixels may be subsequently // transferred back to the image via sync. Quantum *get(const ::ssize_t x_,const ::ssize_t y_, const size_t columns_,const size_t rows_); // Transfer read-only pixels from the image to the pixel view as // defined by the specified region. const Quantum *getConst(const ::ssize_t x_,const ::ssize_t y_, const size_t columns_,const size_t rows_); // Return pixel metacontent void *metacontent(void); // Returns the offset for the specified channel. ssize_t offset(PixelChannel channel) const; // Allocate a pixel view region to store image pixels as defined // by the region rectangle. This area is subsequently transferred // from the pixel view to the image via sync. Quantum *set(const ::ssize_t x_,const ::ssize_t y_,const size_t columns_, const size_t rows_ ); // Transfers the image view pixels to the image. void sync(void); // Left ordinate of view ::ssize_t x(void) const; // Top ordinate of view ::ssize_t y(void) const; // Width of view size_t columns(void) const; // Height of view size_t rows(void) const; private: // Copying and assigning Pixels is not supported. Pixels(const Pixels& pixels_); const Pixels& operator=(const Pixels& pixels_); Magick::Image _image; // Image reference MagickCore::CacheView *_view; // Image view handle ::ssize_t _x; // Left ordinate of view ::ssize_t _y; // Top ordinate of view size_t _columns; // Width of view size_t _rows; // Height of view }; // class Pixels class MagickPPExport PixelData { public: // Construct pixel data using specified image PixelData(Magick::Image &image_,std::string map_,const StorageType type_); // Construct pixel data using specified image PixelData(Magick::Image &image_,const ::ssize_t x_,const ::ssize_t y_, const size_t width_,const size_t height_,std::string map_, const StorageType type_); // Destroy pixel data ~PixelData(void); // Pixel data buffer const void *data(void) const; // Length of the buffer ::ssize_t length(void) const; // Size of the buffer in bytes ::ssize_t size(void) const; private: // Copying and assigning PixelData is not supported PixelData(const PixelData& pixels_); const PixelData& operator=(const PixelData& pixels_); void init(Magick::Image &image_,const ::ssize_t x_,const ::ssize_t y_, const size_t width_,const size_t height_,std::string map_, const StorageType type_); void relinquish(void) throw(); void *_data; // The pixel data ::ssize_t _length; // Length of the data ::ssize_t _size; // Size of the data }; // class PixelData } // Magick namespace // // Inline methods // // Left ordinate of view inline ::ssize_t Magick::Pixels::x(void) const { return _x; } // Top ordinate of view inline ::ssize_t Magick::Pixels::y(void) const { return _y; } // Width of view inline size_t Magick::Pixels::columns(void) const { return _columns; } // Height of view inline size_t Magick::Pixels::rows(void) const { return _rows; } #endif // Magick_Pixels_header
null
null
null
null
72,995
26,114
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,109
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Christian König <christian.koenig@amd.com> */ #include <linux/firmware.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_uvd.h" #include "vid.h" #include "uvd/uvd_5_0_d.h" #include "uvd/uvd_5_0_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" #include "bif/bif_5_0_d.h" #include "vi.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); static int uvd_v5_0_start(struct amdgpu_device *adev); static void uvd_v5_0_stop(struct amdgpu_device *adev); static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state); static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, bool enable); /** * uvd_v5_0_ring_get_rptr - get read pointer * * @ring: amdgpu_ring pointer * * Returns the current hardware read pointer */ static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; return RREG32(mmUVD_RBC_RB_RPTR); } /** * uvd_v5_0_ring_get_wptr - get write pointer * * @ring: amdgpu_ring pointer * * Returns the current hardware write pointer */ static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; return RREG32(mmUVD_RBC_RB_WPTR); } /** * uvd_v5_0_ring_set_wptr - set write pointer * * @ring: amdgpu_ring pointer * * Commits the write pointer to the hardware */ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } static int uvd_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; uvd_v5_0_set_ring_funcs(adev); uvd_v5_0_set_irq_funcs(adev); return 0; } static int uvd_v5_0_sw_init(void *handle) { struct amdgpu_ring *ring; struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; /* UVD TRAP */ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); if (r) return r; r = amdgpu_uvd_sw_init(adev); if (r) return r; r = amdgpu_uvd_resume(adev); if (r) return r; ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); return r; } static int uvd_v5_0_sw_fini(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_suspend(adev); if (r) return r; r = amdgpu_uvd_sw_fini(adev); if (r) return r; return r; } /** * uvd_v5_0_hw_init - start and test UVD block * * @adev: amdgpu_device pointer * * Initialize the hardware, boot up the VCPU and do some testing */ static int uvd_v5_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t tmp; int r; amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); uvd_v5_0_enable_mgcg(adev, true); ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; goto done; } r = amdgpu_ring_alloc(ring, 10); if (r) { DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done; } tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); amdgpu_ring_write(ring, tmp); amdgpu_ring_write(ring, 0xFFFFF); tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); amdgpu_ring_write(ring, tmp); amdgpu_ring_write(ring, 0xFFFFF); tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); amdgpu_ring_write(ring, tmp); amdgpu_ring_write(ring, 0xFFFFF); /* Clear timeout status bits */ amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); amdgpu_ring_write(ring, 0x8); amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); amdgpu_ring_write(ring, 3); amdgpu_ring_commit(ring); done: if (!r) DRM_INFO("UVD initialized successfully.\n"); return r; } /** * uvd_v5_0_hw_fini - stop the hardware block * * @adev: amdgpu_device pointer * * Stop the UVD block, mark ring as not ready any more */ static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v5_0_stop(adev); ring->ready = false; return 0; } static int uvd_v5_0_suspend(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = uvd_v5_0_hw_fini(adev); if (r) return r; uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); r = amdgpu_uvd_suspend(adev); if (r) return r; return r; } static int uvd_v5_0_resume(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_resume(adev); if (r) return r; r = uvd_v5_0_hw_init(adev); if (r) return r; return r; } /** * uvd_v5_0_mc_resume - memory controller programming * * @adev: amdgpu_device pointer * * Let the UVD memory controller know it's offsets */ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) { uint64_t offset; uint32_t size; /* programm memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->uvd.gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->uvd.gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE0, size); offset += size; size = AMDGPU_UVD_HEAP_SIZE; WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE1, size); offset += size; size = AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } /** * uvd_v5_0_start - start UVD block * * @adev: amdgpu_device pointer * * Setup and start the UVD block */ static int uvd_v5_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; int i, j, r; /*disable DPG */ WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); /* disable byte swapping */ lmi_swap_cntl = 0; mp_swap_cntl = 0; uvd_v5_0_mc_resume(adev); /* disable interupt */ WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); /* stall UMC and register bus before resetting VCPU */ WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); mdelay(5); /* take UVD block out of reset */ WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); /* initialize UVD memory controller */ WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | (1 << 21) | (1 << 9) | (1 << 20)); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ lmi_swap_cntl = 0xa; mp_swap_cntl = 0; #endif WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); WREG32(mmUVD_MPC_SET_MUXA1, 0x0); WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); WREG32(mmUVD_MPC_SET_MUXB1, 0x0); WREG32(mmUVD_MPC_SET_ALU, 0); WREG32(mmUVD_MPC_SET_MUX, 0x88); /* take all subblocks out of reset, except VCPU */ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(5); /* enable VCPU clock */ WREG32(mmUVD_VCPU_CNTL, 1 << 9); /* enable UMC */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); mdelay(10); for (i = 0; i < 10; ++i) { uint32_t status; for (j = 0; j < 100; ++j) { status = RREG32(mmUVD_STATUS); if (status & 2) break; mdelay(10); } r = 0; if (status & 2) break; DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(10); WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(10); r = -1; } if (r) { DRM_ERROR("UVD not responding, giving up!!!\n"); return r; } /* enable master interrupt */ WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); /* clear the bit 4 of UVD_STATUS */ WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); rb_bufsz = order_base_2(ring->ring_size); tmp = 0; tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, tmp); /* set the write pointer delay */ WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); /* set the wb address */ WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); /* programm the RB_BASE for ring buffer */ WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); /* Initialize the ring buffer's read and write pointers */ WREG32(mmUVD_RBC_RB_RPTR, 0); ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); return 0; } /** * uvd_v5_0_stop - stop UVD block * * @adev: amdgpu_device pointer * * stop the UVD block */ static void uvd_v5_0_stop(struct amdgpu_device *adev) { /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); /* Stall UMC and register bus before resetting VCPU */ WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); mdelay(1); /* put VCPU into reset */ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(5); /* disable VCPU clock */ WREG32(mmUVD_VCPU_CNTL, 0x0); /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); WREG32(mmUVD_STATUS, 0); } /** * uvd_v5_0_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer * @fence: fence to emit * * Write a fence and a trap command to the ring. */ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, addr & 0xffffffff); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 2); } /** * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush * * @ring: amdgpu_ring pointer * * Emits an hdp flush. */ static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); amdgpu_ring_write(ring, 0); } /** * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate * * @ring: amdgpu_ring pointer * * Emits an hdp invalidate. */ static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); amdgpu_ring_write(ring, 1); } /** * uvd_v5_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer * * Test if we can successfully write to the context register */ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; unsigned i; int r; WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); return r; } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); } if (i < adev->usec_timeout) { DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); r = -EINVAL; } return r; } /** * uvd_v5_0_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer * @ib: indirect buffer to execute * * Write ring commands to execute the indirect buffer */ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); amdgpu_ring_write(ring, ib->length_dw); } static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } static int uvd_v5_0_wait_for_idle(void *handle) { unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) return 0; } return -ETIMEDOUT; } static int uvd_v5_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; uvd_v5_0_stop(adev); WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); return uvd_v5_0_start(adev); } static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) { // TODO return 0; } static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: UVD TRAP\n"); amdgpu_fence_process(&adev->uvd.ring); return 0; } static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) { uint32_t data1, data3, suvd_flags; data1 = RREG32(mmUVD_SUVD_CGC_GATE); data3 = RREG32(mmUVD_CGC_GATE); suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | UVD_SUVD_CGC_GATE__SIT_MASK | UVD_SUVD_CGC_GATE__SMP_MASK | UVD_SUVD_CGC_GATE__SCM_MASK | UVD_SUVD_CGC_GATE__SDB_MASK; if (enable) { data3 |= (UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__UDEC_MASK | UVD_CGC_GATE__MPEG2_MASK | UVD_CGC_GATE__RBC_MASK | UVD_CGC_GATE__LMI_MC_MASK | UVD_CGC_GATE__IDCT_MASK | UVD_CGC_GATE__MPRD_MASK | UVD_CGC_GATE__MPC_MASK | UVD_CGC_GATE__LBSI_MASK | UVD_CGC_GATE__LRBBM_MASK | UVD_CGC_GATE__UDEC_RE_MASK | UVD_CGC_GATE__UDEC_CM_MASK | UVD_CGC_GATE__UDEC_IT_MASK | UVD_CGC_GATE__UDEC_DB_MASK | UVD_CGC_GATE__UDEC_MP_MASK | UVD_CGC_GATE__WCB_MASK | UVD_CGC_GATE__JPEG_MASK | UVD_CGC_GATE__SCPU_MASK); /* only in pg enabled, we can gate clock to vcpu*/ if (adev->pg_flags & AMD_PG_SUPPORT_UVD) data3 |= UVD_CGC_GATE__VCPU_MASK; data3 &= ~UVD_CGC_GATE__REGS_MASK; data1 |= suvd_flags; } else { data3 = 0; data1 = 0; } WREG32(mmUVD_SUVD_CGC_GATE, data1); WREG32(mmUVD_CGC_GATE, data3); } static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data2; data = RREG32(mmUVD_CGC_CTRL); data2 = RREG32(mmUVD_SUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | UVD_CGC_CTRL__SYS_MODE_MASK | UVD_CGC_CTRL__UDEC_MODE_MASK | UVD_CGC_CTRL__MPEG2_MODE_MASK | UVD_CGC_CTRL__REGS_MODE_MASK | UVD_CGC_CTRL__RBC_MODE_MASK | UVD_CGC_CTRL__LMI_MC_MODE_MASK | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | UVD_CGC_CTRL__IDCT_MODE_MASK | UVD_CGC_CTRL__MPRD_MODE_MASK | UVD_CGC_CTRL__MPC_MODE_MASK | UVD_CGC_CTRL__LBSI_MODE_MASK | UVD_CGC_CTRL__LRBBM_MODE_MASK | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__JPEG_MODE_MASK | UVD_CGC_CTRL__SCPU_MODE_MASK); data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); WREG32(mmUVD_CGC_CTRL, data); WREG32(mmUVD_SUVD_CGC_CTRL, data2); } #if 0 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data1, cgc_flags, suvd_flags; data = RREG32(mmUVD_CGC_GATE); data1 = RREG32(mmUVD_SUVD_CGC_GATE); cgc_flags = UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__UDEC_MASK | UVD_CGC_GATE__MPEG2_MASK | UVD_CGC_GATE__RBC_MASK | UVD_CGC_GATE__LMI_MC_MASK | UVD_CGC_GATE__IDCT_MASK | UVD_CGC_GATE__MPRD_MASK | UVD_CGC_GATE__MPC_MASK | UVD_CGC_GATE__LBSI_MASK | UVD_CGC_GATE__LRBBM_MASK | UVD_CGC_GATE__UDEC_RE_MASK | UVD_CGC_GATE__UDEC_CM_MASK | UVD_CGC_GATE__UDEC_IT_MASK | UVD_CGC_GATE__UDEC_DB_MASK | UVD_CGC_GATE__UDEC_MP_MASK | UVD_CGC_GATE__WCB_MASK | UVD_CGC_GATE__VCPU_MASK | UVD_CGC_GATE__SCPU_MASK; suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | UVD_SUVD_CGC_GATE__SIT_MASK | UVD_SUVD_CGC_GATE__SMP_MASK | UVD_SUVD_CGC_GATE__SCM_MASK | UVD_SUVD_CGC_GATE__SDB_MASK; data |= cgc_flags; data1 |= suvd_flags; WREG32(mmUVD_CGC_GATE, data); WREG32(mmUVD_SUVD_CGC_GATE, data1); } #endif static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, bool enable) { u32 orig, data; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); data |= 0xfff; WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); orig = data = RREG32(mmUVD_CGC_CTRL); data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; if (orig != data) WREG32(mmUVD_CGC_CTRL, data); } else { data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); data &= ~0xfff; WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); orig = data = RREG32(mmUVD_CGC_CTRL); data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; if (orig != data) WREG32(mmUVD_CGC_CTRL, data); } } static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; if (enable) { /* wait for STATUS to clear */ if (uvd_v5_0_wait_for_idle(handle)) return -EBUSY; uvd_v5_0_enable_clock_gating(adev, true); /* enable HW gates because UVD is idle */ /* uvd_v5_0_set_hw_clock_gating(adev); */ } else { uvd_v5_0_enable_clock_gating(adev, false); } uvd_v5_0_set_sw_clock_gating(adev); return 0; } static int uvd_v5_0_set_powergating_state(void *handle, enum amd_powergating_state state) { /* This doesn't actually powergate the UVD block. * That's done in the dpm code via the SMC. This * just re-inits the block as necessary. The actual * gating still happens in the dpm code. We should * revisit this when there is a cleaner line between * the smc and the hw blocks */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); } else { ret = uvd_v5_0_start(adev); if (ret) goto out; } out: return ret; } static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int data; mutex_lock(&adev->pm.mutex); if (RREG32_SMC(ixCURRENT_PG_STATUS) & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } /* AMD_CG_SUPPORT_UVD_MGCG */ data = RREG32(mmUVD_CGC_CTRL); if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) *flags |= AMD_CG_SUPPORT_UVD_MGCG; out: mutex_unlock(&adev->pm.mutex); } static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .name = "uvd_v5_0", .early_init = uvd_v5_0_early_init, .late_init = NULL, .sw_init = uvd_v5_0_sw_init, .sw_fini = uvd_v5_0_sw_fini, .hw_init = uvd_v5_0_hw_init, .hw_fini = uvd_v5_0_hw_fini, .suspend = uvd_v5_0_suspend, .resume = uvd_v5_0_resume, .is_idle = uvd_v5_0_is_idle, .wait_for_idle = uvd_v5_0_wait_for_idle, .soft_reset = uvd_v5_0_soft_reset, .set_clockgating_state = uvd_v5_0_set_clockgating_state, .set_powergating_state = uvd_v5_0_set_powergating_state, .get_clockgating_state = uvd_v5_0_get_clockgating_state, }; static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, .get_rptr = uvd_v5_0_ring_get_rptr, .get_wptr = uvd_v5_0_ring_get_wptr, .set_wptr = uvd_v5_0_ring_set_wptr, .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_frame_size = 2 + /* uvd_v5_0_ring_emit_hdp_flush */ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */ 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ .emit_ib = uvd_v5_0_ring_emit_ib, .emit_fence = uvd_v5_0_ring_emit_fence, .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate, .test_ring = uvd_v5_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) { adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; } static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { .set = uvd_v5_0_set_interrupt_state, .process = uvd_v5_0_process_interrupt, }; static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) { adev->uvd.irq.num_types = 1; adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v5_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_UVD, .major = 5, .minor = 0, .rev = 0, .funcs = &uvd_v5_0_ip_funcs, };
null
null
null
null
99,456
332
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
262,900
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
#ifndef M68K_TARGET_SYSCALL_H #define M68K_TARGET_SYSCALL_H /* this struct defines the way the registers are stored on the stack during a system call. */ struct target_pt_regs { abi_long d1, d2, d3, d4, d5, d6, d7; abi_long a0, a1, a2, a3, a4, a5, a6; abi_ulong d0; abi_ulong usp; abi_ulong orig_d0; int16_t stkadj; uint16_t sr; abi_ulong pc; uint16_t fntvex; uint16_t __fill; }; #define UNAME_MACHINE "m68k" #define UNAME_MINIMUM_RELEASE "2.6.32" #define TARGET_MINSIGSTKSZ 2048 #define TARGET_MLOCKALL_MCL_CURRENT 1 #define TARGET_MLOCKALL_MCL_FUTURE 2 #define TARGET_WANT_OLD_SYS_SELECT void do_m68k_simcall(CPUM68KState *, int); #endif /* M68K_TARGET_SYSCALL_H */
null
null
null
null
121,024
24,416
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
24,416
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_ACCESSIBILITY_BROWSER_ACCESSIBILITY_COCOA_H_ #define CONTENT_BROWSER_ACCESSIBILITY_BROWSER_ACCESSIBILITY_COCOA_H_ #import <Cocoa/Cocoa.h> #import "base/mac/scoped_nsobject.h" #include "base/strings/string16.h" #include "content/browser/accessibility/browser_accessibility.h" #include "content/browser/accessibility/browser_accessibility_manager.h" namespace content { // Used to store changes in edit fields, required by VoiceOver in order to // support character echo and other announcements during editing. struct AXTextEdit { AXTextEdit() = default; AXTextEdit(base::string16 inserted_text, base::string16 deleted_text) : inserted_text(inserted_text), deleted_text(deleted_text) {} bool IsEmpty() const { return inserted_text.empty() && deleted_text.empty(); } base::string16 inserted_text; base::string16 deleted_text; }; } // namespace content // BrowserAccessibilityCocoa is a cocoa wrapper around the BrowserAccessibility // object. The renderer converts webkit's accessibility tree into a // WebAccessibility tree and passes it to the browser process over IPC. // This class converts it into a format Cocoa can query. @interface BrowserAccessibilityCocoa : NSObject { @private content::BrowserAccessibility* browserAccessibility_; base::scoped_nsobject<NSMutableArray> children_; // Stores the previous value of an edit field. base::string16 oldValue_; } // This creates a cocoa browser accessibility object around // the cross platform BrowserAccessibility object, which can't be nullptr. - (instancetype)initWithObject:(content::BrowserAccessibility*)accessibility; // Clear this object's pointer to the wrapped BrowserAccessibility object // because the wrapped object has been deleted, but this object may // persist if the system still has references to it. - (void)detach; // Invalidate children for a non-ignored ancestor (including self). - (void)childrenChanged; // Convenience method to get the internal, cross-platform role // from browserAccessibility_. - (ax::mojom::Role)internalRole; // Convenience method to get the BrowserAccessibilityDelegate from // the manager. - (content::BrowserAccessibilityDelegate*)delegate; // Get the BrowserAccessibility that this object wraps. - (content::BrowserAccessibility*)browserAccessibility; // Computes the text that was added or deleted in a text field after an edit. - (content::AXTextEdit)computeTextEdit; // Determines if this object is alive, i.e. it hasn't been detached. - (BOOL)instanceActive; // Convert the local objet's origin to a global point. - (NSPoint)pointInScreen:(NSPoint)origin size:(NSSize)size; // Return the method name for the given attribute. For testing only. - (NSString*)methodNameForAttribute:(NSString*)attribute; // Swap the children array with the given scoped_nsobject. - (void)swapChildren:(base::scoped_nsobject<NSMutableArray>*)other; - (NSString*)valueForRange:(NSRange)range; - (NSAttributedString*)attributedValueForRange:(NSRange)range; - (BOOL)isRowHeaderForCurrentCell:(content::BrowserAccessibility*)header; - (BOOL)isColumnHeaderForCurrentCell:(content::BrowserAccessibility*)header; // Internally-used property. @property(nonatomic, readonly) NSPoint origin; @property(nonatomic, readonly) NSString* accessKey; @property(nonatomic, readonly) NSNumber* ariaAtomic; @property(nonatomic, readonly) NSNumber* ariaBusy; @property(nonatomic, readonly) NSString* ariaLive; @property(nonatomic, readonly) NSNumber* ariaPosInSet; @property(nonatomic, readonly) NSString* ariaRelevant; @property(nonatomic, readonly) NSNumber* ariaSetSize; @property(nonatomic, readonly) NSArray* children; @property(nonatomic, readonly) NSArray* columns; @property(nonatomic, readonly) NSArray* columnHeaders; @property(nonatomic, readonly) NSValue* columnIndexRange; @property(nonatomic, readonly) NSString* description; @property(nonatomic, readonly) NSNumber* disclosing; @property(nonatomic, readonly) id disclosedByRow; @property(nonatomic, readonly) NSNumber* disclosureLevel; @property(nonatomic, readonly) id disclosedRows; @property(nonatomic, readonly) NSString* dropEffects; // Returns the object at the root of the current edit field, if any. @property(nonatomic, readonly) id editableAncestor; @property(nonatomic, readonly) NSNumber* enabled; // Returns a text marker that points to the last character in the document that // can be selected with Voiceover. @property(nonatomic, readonly) id endTextMarker; @property(nonatomic, readonly) NSNumber* expanded; @property(nonatomic, readonly) NSNumber* focused; @property(nonatomic, readonly) NSNumber* grabbed; @property(nonatomic, readonly) id header; @property(nonatomic, readonly) NSString* help; // isIgnored returns whether or not the accessibility object // should be ignored by the accessibility hierarchy. @property(nonatomic, readonly, getter=isIgnored) BOOL ignored; // Index of a row, column, or tree item. @property(nonatomic, readonly) NSNumber* index; @property(nonatomic, readonly) NSNumber* insertionPointLineNumber; @property(nonatomic, readonly) NSString* invalid; @property(nonatomic, readonly) NSNumber* isMultiSelectable; @property(nonatomic, readonly) NSString* placeholderValue; @property(nonatomic, readonly) NSNumber* loaded; @property(nonatomic, readonly) NSNumber* loadingProgress; @property(nonatomic, readonly) NSNumber* maxValue; @property(nonatomic, readonly) NSNumber* minValue; @property(nonatomic, readonly) NSNumber* numberOfCharacters; @property(nonatomic, readonly) NSString* orientation; @property(nonatomic, readonly) id parent; @property(nonatomic, readonly) NSValue* position; @property(nonatomic, readonly) NSNumber* required; // A string indicating the role of this object as far as accessibility // is concerned. @property(nonatomic, readonly) NSString* role; @property(nonatomic, readonly) NSString* roleDescription; @property(nonatomic, readonly) NSArray* rowHeaders; @property(nonatomic, readonly) NSValue* rowIndexRange; @property(nonatomic, readonly) NSArray* rows; // The object is selected as a whole. @property(nonatomic, readonly) NSNumber* selected; @property(nonatomic, readonly) NSArray* selectedChildren; @property(nonatomic, readonly) NSString* selectedText; @property(nonatomic, readonly) NSValue* selectedTextRange; @property(nonatomic, readonly) id selectedTextMarkerRange; @property(nonatomic, readonly) NSValue* size; @property(nonatomic, readonly) NSString* sortDirection; // Returns a text marker that points to the first character in the document that // can be selected with Voiceover. @property(nonatomic, readonly) id startTextMarker; // A string indicating the subrole of this object as far as accessibility // is concerned. @property(nonatomic, readonly) NSString* subrole; // The tabs owned by a tablist. @property(nonatomic, readonly) NSArray* tabs; @property(nonatomic, readonly) NSString* title; @property(nonatomic, readonly) id titleUIElement; @property(nonatomic, readonly) NSURL* url; @property(nonatomic, readonly) NSString* value; @property(nonatomic, readonly) NSString* valueDescription; @property(nonatomic, readonly) NSValue* visibleCharacterRange; @property(nonatomic, readonly) NSArray* visibleCells; @property(nonatomic, readonly) NSArray* visibleChildren; @property(nonatomic, readonly) NSArray* visibleColumns; @property(nonatomic, readonly) NSArray* visibleRows; @property(nonatomic, readonly) NSNumber* visited; @property(nonatomic, readonly) id window; @end #endif // CONTENT_BROWSER_ACCESSIBILITY_BROWSER_ACCESSIBILITY_COCOA_H_
null
null
null
null
21,279
2,449
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
155,506
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Common code between the AC-3 encoder and decoder * Copyright (c) 2000 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Common code between the AC-3 encoder and decoder. */ #include "libavutil/common.h" #include "avcodec.h" #include "ac3.h" /** * Starting frequency coefficient bin for each critical band. */ const uint8_t ff_ac3_band_start_tab[AC3_CRITICAL_BANDS+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 34, 37, 40, 43, 46, 49, 55, 61, 67, 73, 79, 85, 97, 109, 121, 133, 157, 181, 205, 229, 253 }; /** * Map each frequency coefficient bin to the critical band that contains it. */ const uint8_t ff_ac3_bin_to_band_tab[253] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35, 35, 35, 35, 36, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49 }; static inline int calc_lowcomp1(int a, int b0, int b1, int c) { if ((b0 + 256) == b1) { a = c; } else if (b0 > b1) { a = FFMAX(a - 64, 0); } return a; } static inline int calc_lowcomp(int a, int b0, int b1, int bin) { if (bin < 7) { return calc_lowcomp1(a, b0, b1, 384); } else if (bin < 20) { return calc_lowcomp1(a, b0, b1, 320); } else { return FFMAX(a - 128, 0); } } void ff_ac3_bit_alloc_calc_psd(int8_t *exp, int start, int end, int16_t *psd, int16_t *band_psd) { int bin, band; /* exponent mapping to PSD */ for (bin = start; bin < end; bin++) { psd[bin]=(3072 - (exp[bin] << 7)); } /* PSD integration */ bin = start; band = ff_ac3_bin_to_band_tab[start]; do { int v = psd[bin++]; int band_end = FFMIN(ff_ac3_band_start_tab[band+1], end); for (; bin < band_end; bin++) { int max = FFMAX(v, psd[bin]); /* logadd */ int adr = FFMIN(max - ((v + psd[bin] + 1) >> 1), 255); v = max + ff_ac3_log_add_tab[adr]; } band_psd[band++] = v; } while (end > ff_ac3_band_start_tab[band]); } int ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd, int start, int end, int fast_gain, int is_lfe, int dba_mode, int dba_nsegs, uint8_t *dba_offsets, uint8_t *dba_lengths, uint8_t *dba_values, int16_t *mask) { int16_t excite[AC3_CRITICAL_BANDS]; /* excitation */ int band; int band_start, band_end, begin, end1; int lowcomp, fastleak, slowleak; if (end <= 0) return AVERROR_INVALIDDATA; /* excitation function */ band_start = ff_ac3_bin_to_band_tab[start]; band_end = ff_ac3_bin_to_band_tab[end-1] + 1; if (band_start == 0) { lowcomp = 0; lowcomp = calc_lowcomp1(lowcomp, band_psd[0], band_psd[1], 384); excite[0] = band_psd[0] - fast_gain - lowcomp; lowcomp = calc_lowcomp1(lowcomp, band_psd[1], band_psd[2], 384); excite[1] = band_psd[1] - fast_gain - lowcomp; begin = 7; for (band = 2; band < 7; band++) { if (!(is_lfe && band == 6)) lowcomp = calc_lowcomp1(lowcomp, band_psd[band], band_psd[band+1], 384); fastleak = band_psd[band] - fast_gain; slowleak = band_psd[band] - s->slow_gain; excite[band] = fastleak - lowcomp; if (!(is_lfe && band == 6)) { if (band_psd[band] <= band_psd[band+1]) { begin = band + 1; break; } } } end1 = FFMIN(band_end, 22); for (band = begin; band < end1; band++) { if (!(is_lfe && band == 6)) lowcomp = calc_lowcomp(lowcomp, band_psd[band], band_psd[band+1], band); fastleak = FFMAX(fastleak - s->fast_decay, band_psd[band] - fast_gain); slowleak = FFMAX(slowleak - s->slow_decay, band_psd[band] - s->slow_gain); excite[band] = FFMAX(fastleak - lowcomp, slowleak); } begin = 22; } else { /* coupling channel */ begin = band_start; fastleak = (s->cpl_fast_leak << 8) + 768; slowleak = (s->cpl_slow_leak << 8) + 768; } for (band = begin; band < band_end; band++) { fastleak = FFMAX(fastleak - s->fast_decay, band_psd[band] - fast_gain); slowleak = FFMAX(slowleak - s->slow_decay, band_psd[band] - s->slow_gain); excite[band] = FFMAX(fastleak, slowleak); } /* compute masking curve */ for (band = band_start; band < band_end; band++) { int tmp = s->db_per_bit - band_psd[band]; if (tmp > 0) { excite[band] += tmp >> 2; } mask[band] = FFMAX(ff_ac3_hearing_threshold_tab[band >> s->sr_shift][s->sr_code], excite[band]); } /* delta bit allocation */ if (dba_mode == DBA_REUSE || dba_mode == DBA_NEW) { int i, seg, delta; if (dba_nsegs > 8) return -1; band = band_start; for (seg = 0; seg < dba_nsegs; seg++) { band += dba_offsets[seg]; if (band >= AC3_CRITICAL_BANDS || dba_lengths[seg] > AC3_CRITICAL_BANDS-band) return -1; if (dba_values[seg] >= 4) { delta = (dba_values[seg] - 3) * 128; } else { delta = (dba_values[seg] - 4) * 128; } for (i = 0; i < dba_lengths[seg]; i++) { mask[band++] += delta; } } } return 0; }
null
null
null
null
71,561
39,664
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
39,664
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2011 Ericsson AB. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * 3. Neither the name of Ericsson nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_MEDIASTREAM_USER_MEDIA_REQUEST_H_ #define THIRD_PARTY_BLINK_RENDERER_MODULES_MEDIASTREAM_USER_MEDIA_REQUEST_H_ #include "third_party/blink/public/platform/web_media_constraints.h" #include "third_party/blink/public/web/web_user_media_request.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_navigator_user_media_error_callback.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_navigator_user_media_success_callback.h" #include "third_party/blink/renderer/core/dom/pausable_object.h" #include "third_party/blink/renderer/modules/modules_export.h" #include "third_party/blink/renderer/platform/mediastream/media_stream_source.h" #include "third_party/blink/renderer/platform/wtf/forward.h" namespace blink { class Document; class MediaErrorState; class MediaStreamConstraints; class MediaStreamDescriptor; class UserMediaController; class MODULES_EXPORT UserMediaRequest final : public GarbageCollectedFinalized<UserMediaRequest>, public ContextLifecycleObserver { USING_GARBAGE_COLLECTED_MIXIN(UserMediaRequest); public: class Callbacks : public GarbageCollectedFinalized<Callbacks> { public: virtual ~Callbacks() = default; virtual void OnSuccess(ScriptWrappable* callback_this_value, MediaStream*) = 0; virtual void OnError(ScriptWrappable* callback_this_value, DOMExceptionOrOverconstrainedError) = 0; virtual void Trace(blink::Visitor*) {} protected: Callbacks() = default; }; class V8Callbacks; static UserMediaRequest* Create(ExecutionContext*, UserMediaController*, const MediaStreamConstraints& options, Callbacks*, MediaErrorState&); static UserMediaRequest* Create(ExecutionContext*, UserMediaController*, const MediaStreamConstraints& options, V8NavigatorUserMediaSuccessCallback*, V8NavigatorUserMediaErrorCallback*, MediaErrorState&); static UserMediaRequest* CreateForTesting(const WebMediaConstraints& audio, const WebMediaConstraints& video); virtual ~UserMediaRequest(); Document* OwnerDocument(); void Start(); void Succeed(MediaStreamDescriptor*); void FailConstraint(const String& constraint_name, const String& message); void Fail(WebUserMediaRequest::Error name, const String& message); bool Audio() const; bool Video() const; WebMediaConstraints AudioConstraints() const; WebMediaConstraints VideoConstraints() const; // Flag tied to whether or not the similarly named Origin Trial is // enabled. Will be removed at end of trial. See: http://crbug.com/789152. bool ShouldDisableHardwareNoiseSuppression() const; bool ShouldEnableExperimentalHardwareEchoCancellation() const; // errorMessage is only set if requestIsPrivilegedContext() returns |false|. // Caller is responsible for properly setting errors and canceling request. bool IsSecureContextUse(String& error_message); // ContextLifecycleObserver void ContextDestroyed(ExecutionContext*) override; virtual void Trace(blink::Visitor*); private: UserMediaRequest(ExecutionContext*, UserMediaController*, WebMediaConstraints audio, WebMediaConstraints video, Callbacks*); WebMediaConstraints audio_; WebMediaConstraints video_; bool should_disable_hardware_noise_suppression_; bool should_enable_experimental_hw_echo_cancellation_; Member<UserMediaController> controller_; Member<Callbacks> callbacks_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_MODULES_MEDIASTREAM_USER_MEDIA_REQUEST_H_
null
null
null
null
36,527
3,078
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
156,135
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Copyright (c) 2008 Jaikrishnan Menon <realityman@gmx.net> * Copyright (c) 2010 Peter Ross <pross@xvid.org> * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * IFF file demuxer * by Jaikrishnan Menon * for more information on the .iff file format, visit: * http://wiki.multimedia.cx/index.php?title=IFF */ #include <inttypes.h> #include "libavutil/avassert.h" #include "libavutil/channel_layout.h" #include "libavutil/intreadwrite.h" #include "libavutil/dict.h" #include "libavcodec/bytestream.h" #include "avformat.h" #include "id3v2.h" #include "internal.h" #define ID_8SVX MKTAG('8','S','V','X') #define ID_16SV MKTAG('1','6','S','V') #define ID_MAUD MKTAG('M','A','U','D') #define ID_MHDR MKTAG('M','H','D','R') #define ID_MDAT MKTAG('M','D','A','T') #define ID_VHDR MKTAG('V','H','D','R') #define ID_ATAK MKTAG('A','T','A','K') #define ID_RLSE MKTAG('R','L','S','E') #define ID_CHAN MKTAG('C','H','A','N') #define ID_PBM MKTAG('P','B','M',' ') #define ID_ILBM MKTAG('I','L','B','M') #define ID_BMHD MKTAG('B','M','H','D') #define ID_DGBL MKTAG('D','G','B','L') #define ID_CAMG MKTAG('C','A','M','G') #define ID_CMAP MKTAG('C','M','A','P') #define ID_ACBM MKTAG('A','C','B','M') #define ID_DEEP MKTAG('D','E','E','P') #define ID_RGB8 MKTAG('R','G','B','8') #define ID_RGBN MKTAG('R','G','B','N') #define ID_DSD MKTAG('D','S','D',' ') #define ID_DST MKTAG('D','S','T',' ') #define ID_DSTC MKTAG('D','S','T','C') #define ID_DSTF MKTAG('D','S','T','F') #define ID_FRTE MKTAG('F','R','T','E') #define ID_ANIM MKTAG('A','N','I','M') #define ID_ANHD MKTAG('A','N','H','D') #define ID_DLTA MKTAG('D','L','T','A') #define ID_DPAN MKTAG('D','P','A','N') #define ID_FORM MKTAG('F','O','R','M') #define ID_FRM8 MKTAG('F','R','M','8') #define ID_ANNO MKTAG('A','N','N','O') #define ID_AUTH MKTAG('A','U','T','H') #define ID_CHRS MKTAG('C','H','R','S') #define ID_COPYRIGHT MKTAG('(','c',')',' ') #define ID_CSET MKTAG('C','S','E','T') #define ID_FVER MKTAG('F','V','E','R') #define ID_NAME MKTAG('N','A','M','E') #define ID_TEXT MKTAG('T','E','X','T') #define ID_ABIT MKTAG('A','B','I','T') #define ID_BODY MKTAG('B','O','D','Y') #define ID_DBOD MKTAG('D','B','O','D') #define ID_DPEL MKTAG('D','P','E','L') #define ID_DLOC MKTAG('D','L','O','C') #define ID_TVDC MKTAG('T','V','D','C') #define LEFT 2 #define RIGHT 4 #define STEREO 6 /** * This number of bytes if added at the beginning of each AVPacket * which contain additional information about video properties * which has to be shared between demuxer and decoder. * This number may change between frames, e.g. the demuxer might * set it to smallest possible size of 2 to indicate that there's * no extradata changing in this frame. */ #define IFF_EXTRA_VIDEO_SIZE 41 typedef enum { COMP_NONE, COMP_FIB, COMP_EXP } svx8_compression_type; typedef struct IffDemuxContext { int is_64bit; ///< chunk size is 64-bit int64_t body_pos; int64_t body_end; uint32_t body_size; svx8_compression_type svx8_compression; unsigned maud_bits; unsigned maud_compression; unsigned bitmap_compression; ///< delta compression method used unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM) unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise) unsigned flags; ///< 1 for EHB, 0 is no extra half darkening unsigned transparency; ///< transparency color index in palette unsigned masking; ///< masking method used uint8_t tvdc[32]; ///< TVDC lookup table int64_t pts; } IffDemuxContext; /* Metadata string read */ static int get_metadata(AVFormatContext *s, const char *const tag, const unsigned data_size) { uint8_t *buf = ((data_size + 1) == 0) ? NULL : av_malloc(data_size + 1); if (!buf) return AVERROR(ENOMEM); if (avio_read(s->pb, buf, data_size) != data_size) { av_free(buf); return AVERROR(EIO); } buf[data_size] = 0; av_dict_set(&s->metadata, tag, buf, AV_DICT_DONT_STRDUP_VAL); return 0; } static int iff_probe(AVProbeData *p) { const uint8_t *d = p->buf; if ( (AV_RL32(d) == ID_FORM && (AV_RL32(d+8) == ID_8SVX || AV_RL32(d+8) == ID_16SV || AV_RL32(d+8) == ID_MAUD || AV_RL32(d+8) == ID_PBM || AV_RL32(d+8) == ID_ACBM || AV_RL32(d+8) == ID_DEEP || AV_RL32(d+8) == ID_ILBM || AV_RL32(d+8) == ID_RGB8 || AV_RL32(d+8) == ID_ANIM || AV_RL32(d+8) == ID_RGBN)) || (AV_RL32(d) == ID_FRM8 && AV_RL32(d+12) == ID_DSD)) return AVPROBE_SCORE_MAX; return 0; } static const AVCodecTag dsd_codec_tags[] = { { AV_CODEC_ID_DSD_MSBF, ID_DSD }, { AV_CODEC_ID_DST, ID_DST }, { AV_CODEC_ID_NONE, 0 }, }; #define DSD_SLFT MKTAG('S','L','F','T') #define DSD_SRGT MKTAG('S','R','G','T') #define DSD_MLFT MKTAG('M','L','F','T') #define DSD_MRGT MKTAG('M','R','G','T') #define DSD_C MKTAG('C',' ',' ',' ') #define DSD_LS MKTAG('L','S',' ',' ') #define DSD_RS MKTAG('R','S',' ',' ') #define DSD_LFE MKTAG('L','F','E',' ') static const uint32_t dsd_stereo[] = { DSD_SLFT, DSD_SRGT }; static const uint32_t dsd_5point0[] = { DSD_MLFT, DSD_MRGT, DSD_C, DSD_LS, DSD_RS }; static const uint32_t dsd_5point1[] = { DSD_MLFT, DSD_MRGT, DSD_C, DSD_LFE, DSD_LS, DSD_RS }; typedef struct { uint64_t layout; const uint32_t * dsd_layout; } DSDLayoutDesc; static const DSDLayoutDesc dsd_channel_layout[] = { { AV_CH_LAYOUT_STEREO, dsd_stereo }, { AV_CH_LAYOUT_5POINT0, dsd_5point0 }, { AV_CH_LAYOUT_5POINT1, dsd_5point1 }, }; static const uint64_t dsd_loudspeaker_config[] = { AV_CH_LAYOUT_STEREO, 0, 0, AV_CH_LAYOUT_5POINT0, AV_CH_LAYOUT_5POINT1, }; static const char * dsd_source_comment[] = { "dsd_source_comment", "analogue_source_comment", "pcm_source_comment", }; static const char * dsd_history_comment[] = { "general_remark", "operator_name", "creating_machine", "timezone", "file_revision" }; static int parse_dsd_diin(AVFormatContext *s, AVStream *st, uint64_t eof) { AVIOContext *pb = s->pb; while (avio_tell(pb) + 12 <= eof && !avio_feof(pb)) { uint32_t tag = avio_rl32(pb); uint64_t size = avio_rb64(pb); uint64_t orig_pos = avio_tell(pb); const char * metadata_tag = NULL; switch(tag) { case MKTAG('D','I','A','R'): metadata_tag = "artist"; break; case MKTAG('D','I','T','I'): metadata_tag = "title"; break; } if (metadata_tag && size > 4) { unsigned int tag_size = avio_rb32(pb); int ret = get_metadata(s, metadata_tag, FFMIN(tag_size, size - 4)); if (ret < 0) { av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", metadata_tag); return ret; } } avio_skip(pb, size - (avio_tell(pb) - orig_pos) + (size & 1)); } return 0; } static int parse_dsd_prop(AVFormatContext *s, AVStream *st, uint64_t eof) { AVIOContext *pb = s->pb; char abss[24]; int hour, min, sec, i, ret, config; int dsd_layout[6]; ID3v2ExtraMeta *id3v2_extra_meta; while (avio_tell(pb) + 12 <= eof && !avio_feof(pb)) { uint32_t tag = avio_rl32(pb); uint64_t size = avio_rb64(pb); uint64_t orig_pos = avio_tell(pb); switch(tag) { case MKTAG('A','B','S','S'): if (size < 8) return AVERROR_INVALIDDATA; hour = avio_rb16(pb); min = avio_r8(pb); sec = avio_r8(pb); snprintf(abss, sizeof(abss), "%02dh:%02dm:%02ds:%d", hour, min, sec, avio_rb32(pb)); av_dict_set(&st->metadata, "absolute_start_time", abss, 0); break; case MKTAG('C','H','N','L'): if (size < 2) return AVERROR_INVALIDDATA; st->codecpar->channels = avio_rb16(pb); if (size < 2 + st->codecpar->channels * 4) return AVERROR_INVALIDDATA; st->codecpar->channel_layout = 0; if (st->codecpar->channels > FF_ARRAY_ELEMS(dsd_layout)) { avpriv_request_sample(s, "channel layout"); break; } for (i = 0; i < st->codecpar->channels; i++) dsd_layout[i] = avio_rl32(pb); for (i = 0; i < FF_ARRAY_ELEMS(dsd_channel_layout); i++) { const DSDLayoutDesc * d = &dsd_channel_layout[i]; if (av_get_channel_layout_nb_channels(d->layout) == st->codecpar->channels && !memcmp(d->dsd_layout, dsd_layout, st->codecpar->channels * sizeof(uint32_t))) { st->codecpar->channel_layout = d->layout; break; } } break; case MKTAG('C','M','P','R'): if (size < 4) return AVERROR_INVALIDDATA; st->codecpar->codec_tag = tag = avio_rl32(pb); st->codecpar->codec_id = ff_codec_get_id(dsd_codec_tags, tag); if (!st->codecpar->codec_id) { av_log(s, AV_LOG_ERROR, "'%s' compression is not supported\n", av_fourcc2str(tag)); return AVERROR_PATCHWELCOME; } break; case MKTAG('F','S',' ',' '): if (size < 4) return AVERROR_INVALIDDATA; st->codecpar->sample_rate = avio_rb32(pb) / 8; break; case MKTAG('I','D','3',' '): id3v2_extra_meta = NULL; ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, size); if (id3v2_extra_meta) { if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0 || (ret = ff_id3v2_parse_chapters(s, &id3v2_extra_meta)) < 0) { ff_id3v2_free_extra_meta(&id3v2_extra_meta); return ret; } ff_id3v2_free_extra_meta(&id3v2_extra_meta); } if (size < avio_tell(pb) - orig_pos) { av_log(s, AV_LOG_ERROR, "id3 exceeds chunk size\n"); return AVERROR_INVALIDDATA; } break; case MKTAG('L','S','C','O'): if (size < 2) return AVERROR_INVALIDDATA; config = avio_rb16(pb); if (config != 0xFFFF) { if (config < FF_ARRAY_ELEMS(dsd_loudspeaker_config)) st->codecpar->channel_layout = dsd_loudspeaker_config[config]; if (!st->codecpar->channel_layout) avpriv_request_sample(s, "loudspeaker configuration %d", config); } break; } avio_skip(pb, size - (avio_tell(pb) - orig_pos) + (size & 1)); } return 0; } static int read_dst_frame(AVFormatContext *s, AVPacket *pkt) { IffDemuxContext *iff = s->priv_data; AVIOContext *pb = s->pb; uint32_t chunk_id; uint64_t chunk_pos, data_pos, data_size; int ret = AVERROR_EOF; while (!avio_feof(pb)) { chunk_pos = avio_tell(pb); if (chunk_pos >= iff->body_end) return AVERROR_EOF; chunk_id = avio_rl32(pb); data_size = iff->is_64bit ? avio_rb64(pb) : avio_rb32(pb); data_pos = avio_tell(pb); if (data_size < 1) return AVERROR_INVALIDDATA; switch (chunk_id) { case ID_DSTF: if (!pkt) { iff->body_pos = avio_tell(pb) - (iff->is_64bit ? 12 : 8); iff->body_size = iff->body_end - iff->body_pos; return 0; } ret = av_get_packet(pb, pkt, data_size); if (ret < 0) return ret; if (data_size & 1) avio_skip(pb, 1); pkt->flags |= AV_PKT_FLAG_KEY; pkt->stream_index = 0; pkt->duration = 588 * s->streams[0]->codecpar->sample_rate / 44100; pkt->pos = chunk_pos; chunk_pos = avio_tell(pb); if (chunk_pos >= iff->body_end) return 0; avio_seek(pb, chunk_pos, SEEK_SET); return 0; case ID_FRTE: if (data_size < 4) return AVERROR_INVALIDDATA; s->streams[0]->duration = avio_rb32(pb) * 588LL * s->streams[0]->codecpar->sample_rate / 44100; break; } avio_skip(pb, data_size - (avio_tell(pb) - data_pos) + (data_size & 1)); } return ret; } static const uint8_t deep_rgb24[] = {0, 0, 0, 3, 0, 1, 0, 8, 0, 2, 0, 8, 0, 3, 0, 8}; static const uint8_t deep_rgba[] = {0, 0, 0, 4, 0, 1, 0, 8, 0, 2, 0, 8, 0, 3, 0, 8}; static const uint8_t deep_bgra[] = {0, 0, 0, 4, 0, 3, 0, 8, 0, 2, 0, 8, 0, 1, 0, 8}; static const uint8_t deep_argb[] = {0, 0, 0, 4, 0,17, 0, 8, 0, 1, 0, 8, 0, 2, 0, 8}; static const uint8_t deep_abgr[] = {0, 0, 0, 4, 0,17, 0, 8, 0, 3, 0, 8, 0, 2, 0, 8}; static int iff_read_header(AVFormatContext *s) { IffDemuxContext *iff = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; uint8_t *buf; uint32_t chunk_id; uint64_t data_size; uint32_t screenmode = 0, num, den; unsigned transparency = 0; unsigned masking = 0; // no mask uint8_t fmt[16]; int fmt_size; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codecpar->channels = 1; st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; iff->is_64bit = avio_rl32(pb) == ID_FRM8; avio_skip(pb, iff->is_64bit ? 8 : 4); // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content st->codecpar->codec_tag = avio_rl32(pb); if (st->codecpar->codec_tag == ID_ANIM) { avio_skip(pb, 12); } iff->bitmap_compression = -1; iff->svx8_compression = -1; iff->maud_bits = -1; iff->maud_compression = -1; while(!avio_feof(pb)) { uint64_t orig_pos; int res; const char *metadata_tag = NULL; int version, nb_comments, i; chunk_id = avio_rl32(pb); data_size = iff->is_64bit ? avio_rb64(pb) : avio_rb32(pb); orig_pos = avio_tell(pb); switch(chunk_id) { case ID_VHDR: st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; if (data_size < 14) return AVERROR_INVALIDDATA; avio_skip(pb, 12); st->codecpar->sample_rate = avio_rb16(pb); if (data_size >= 16) { avio_skip(pb, 1); iff->svx8_compression = avio_r8(pb); } break; case ID_MHDR: st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; if (data_size < 32) return AVERROR_INVALIDDATA; avio_skip(pb, 4); iff->maud_bits = avio_rb16(pb); avio_skip(pb, 2); num = avio_rb32(pb); den = avio_rb16(pb); if (!den) return AVERROR_INVALIDDATA; avio_skip(pb, 2); st->codecpar->sample_rate = num / den; st->codecpar->channels = avio_rb16(pb); iff->maud_compression = avio_rb16(pb); if (st->codecpar->channels == 1) st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; else if (st->codecpar->channels == 2) st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; break; case ID_ABIT: case ID_BODY: case ID_DBOD: case ID_DSD: case ID_DST: case ID_MDAT: iff->body_pos = avio_tell(pb); iff->body_end = iff->body_pos + data_size; iff->body_size = data_size; if (chunk_id == ID_DST) { int ret = read_dst_frame(s, NULL); if (ret < 0) return ret; } break; case ID_CHAN: if (data_size < 4) return AVERROR_INVALIDDATA; if (avio_rb32(pb) < 6) { st->codecpar->channels = 1; st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; } else { st->codecpar->channels = 2; st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; } break; case ID_CAMG: if (data_size < 4) return AVERROR_INVALIDDATA; screenmode = avio_rb32(pb); break; case ID_CMAP: if (data_size < 3 || data_size > 768 || data_size % 3) { av_log(s, AV_LOG_ERROR, "Invalid CMAP chunk size %"PRIu64"\n", data_size); return AVERROR_INVALIDDATA; } st->codecpar->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE; st->codecpar->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codecpar->extradata) return AVERROR(ENOMEM); if (avio_read(pb, st->codecpar->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0) return AVERROR(EIO); break; case ID_BMHD: st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size <= 8) return AVERROR_INVALIDDATA; st->codecpar->width = avio_rb16(pb); st->codecpar->height = avio_rb16(pb); avio_skip(pb, 4); // x, y offset st->codecpar->bits_per_coded_sample = avio_r8(pb); if (data_size >= 10) masking = avio_r8(pb); if (data_size >= 11) iff->bitmap_compression = avio_r8(pb); if (data_size >= 14) { avio_skip(pb, 1); // padding transparency = avio_rb16(pb); } if (data_size >= 16) { st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb); } break; case ID_ANHD: break; case ID_DPAN: avio_skip(pb, 2); st->duration = avio_rb16(pb); break; case ID_DPEL: if (data_size < 4 || (data_size & 3)) return AVERROR_INVALIDDATA; if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0) return fmt_size; if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24))) st->codecpar->format = AV_PIX_FMT_RGB24; else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba))) st->codecpar->format = AV_PIX_FMT_RGBA; else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra))) st->codecpar->format = AV_PIX_FMT_BGRA; else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb))) st->codecpar->format = AV_PIX_FMT_ARGB; else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr))) st->codecpar->format = AV_PIX_FMT_ABGR; else { avpriv_request_sample(s, "color format %.16s", fmt); return AVERROR_PATCHWELCOME; } break; case ID_DGBL: st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size < 8) return AVERROR_INVALIDDATA; st->codecpar->width = avio_rb16(pb); st->codecpar->height = avio_rb16(pb); iff->bitmap_compression = avio_rb16(pb); st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb); st->codecpar->bits_per_coded_sample = 24; break; case ID_DLOC: if (data_size < 4) return AVERROR_INVALIDDATA; st->codecpar->width = avio_rb16(pb); st->codecpar->height = avio_rb16(pb); break; case ID_TVDC: if (data_size < sizeof(iff->tvdc)) return AVERROR_INVALIDDATA; res = avio_read(pb, iff->tvdc, sizeof(iff->tvdc)); if (res < 0) return res; break; case ID_ANNO: case ID_TEXT: metadata_tag = "comment"; break; case ID_AUTH: metadata_tag = "artist"; break; case ID_COPYRIGHT: metadata_tag = "copyright"; break; case ID_NAME: metadata_tag = "title"; break; /* DSD tags */ case MKTAG('F','V','E','R'): if (data_size < 4) return AVERROR_INVALIDDATA; version = avio_rb32(pb); av_log(s, AV_LOG_DEBUG, "DSIFF v%d.%d.%d.%d\n",version >> 24, (version >> 16) & 0xFF, (version >> 8) & 0xFF, version & 0xFF); st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; break; case MKTAG('D','I','I','N'): res = parse_dsd_diin(s, st, orig_pos + data_size); if (res < 0) return res; break; case MKTAG('P','R','O','P'): if (data_size < 4) return AVERROR_INVALIDDATA; if (avio_rl32(pb) != MKTAG('S','N','D',' ')) { avpriv_request_sample(s, "unknown property type"); break; } res = parse_dsd_prop(s, st, orig_pos + data_size); if (res < 0) return res; break; case MKTAG('C','O','M','T'): if (data_size < 2) return AVERROR_INVALIDDATA; nb_comments = avio_rb16(pb); for (i = 0; i < nb_comments; i++) { int year, mon, day, hour, min, type, ref; char tmp[24]; const char *tag; int metadata_size; year = avio_rb16(pb); mon = avio_r8(pb); day = avio_r8(pb); hour = avio_r8(pb); min = avio_r8(pb); snprintf(tmp, sizeof(tmp), "%04d-%02d-%02d %02d:%02d", year, mon, day, hour, min); av_dict_set(&st->metadata, "comment_time", tmp, 0); type = avio_rb16(pb); ref = avio_rb16(pb); switch (type) { case 1: if (!i) tag = "channel_comment"; else { snprintf(tmp, sizeof(tmp), "channel%d_comment", ref); tag = tmp; } break; case 2: tag = ref < FF_ARRAY_ELEMS(dsd_source_comment) ? dsd_source_comment[ref] : "source_comment"; break; case 3: tag = ref < FF_ARRAY_ELEMS(dsd_history_comment) ? dsd_history_comment[ref] : "file_history"; break; default: tag = "comment"; } metadata_size = avio_rb32(pb); if ((res = get_metadata(s, tag, metadata_size)) < 0) { av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", tag); return res; } if (metadata_size & 1) avio_skip(pb, 1); } break; } if (metadata_tag) { if ((res = get_metadata(s, metadata_tag, data_size)) < 0) { av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", metadata_tag); return res; } } avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1)); } if (st->codecpar->codec_tag == ID_ANIM) avio_seek(pb, 12, SEEK_SET); else avio_seek(pb, iff->body_pos, SEEK_SET); switch(st->codecpar->codec_type) { case AVMEDIA_TYPE_AUDIO: avpriv_set_pts_info(st, 32, 1, st->codecpar->sample_rate); if (st->codecpar->codec_tag == ID_16SV) st->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE_PLANAR; else if (st->codecpar->codec_tag == ID_MAUD) { if (iff->maud_bits == 8 && !iff->maud_compression) { st->codecpar->codec_id = AV_CODEC_ID_PCM_U8; } else if (iff->maud_bits == 16 && !iff->maud_compression) { st->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE; } else if (iff->maud_bits == 8 && iff->maud_compression == 2) { st->codecpar->codec_id = AV_CODEC_ID_PCM_ALAW; } else if (iff->maud_bits == 8 && iff->maud_compression == 3) { st->codecpar->codec_id = AV_CODEC_ID_PCM_MULAW; } else { avpriv_request_sample(s, "compression %d and bit depth %d", iff->maud_compression, iff->maud_bits); return AVERROR_PATCHWELCOME; } } else if (st->codecpar->codec_tag != ID_DSD && st->codecpar->codec_tag != ID_DST) { switch (iff->svx8_compression) { case COMP_NONE: st->codecpar->codec_id = AV_CODEC_ID_PCM_S8_PLANAR; break; case COMP_FIB: st->codecpar->codec_id = AV_CODEC_ID_8SVX_FIB; break; case COMP_EXP: st->codecpar->codec_id = AV_CODEC_ID_8SVX_EXP; break; default: av_log(s, AV_LOG_ERROR, "Unknown SVX8 compression method '%d'\n", iff->svx8_compression); return -1; } } st->codecpar->bits_per_coded_sample = av_get_bits_per_sample(st->codecpar->codec_id); st->codecpar->bit_rate = (int64_t)st->codecpar->channels * st->codecpar->sample_rate * st->codecpar->bits_per_coded_sample; st->codecpar->block_align = st->codecpar->channels * st->codecpar->bits_per_coded_sample; if (st->codecpar->codec_tag == ID_DSD && st->codecpar->block_align <= 0) return AVERROR_INVALIDDATA; break; case AVMEDIA_TYPE_VIDEO: iff->bpp = st->codecpar->bits_per_coded_sample; if (st->codecpar->codec_tag == ID_ANIM) avpriv_set_pts_info(st, 32, 1, 60); if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) { iff->ham = iff->bpp > 6 ? 6 : 4; st->codecpar->bits_per_coded_sample = 24; } iff->flags = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8; iff->masking = masking; iff->transparency = transparency; if (!st->codecpar->extradata) { st->codecpar->extradata_size = IFF_EXTRA_VIDEO_SIZE; st->codecpar->extradata = av_malloc(IFF_EXTRA_VIDEO_SIZE + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codecpar->extradata) return AVERROR(ENOMEM); } av_assert0(st->codecpar->extradata_size >= IFF_EXTRA_VIDEO_SIZE); buf = st->codecpar->extradata; bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE); bytestream_put_byte(&buf, iff->bitmap_compression); bytestream_put_byte(&buf, iff->bpp); bytestream_put_byte(&buf, iff->ham); bytestream_put_byte(&buf, iff->flags); bytestream_put_be16(&buf, iff->transparency); bytestream_put_byte(&buf, iff->masking); bytestream_put_buffer(&buf, iff->tvdc, sizeof(iff->tvdc)); st->codecpar->codec_id = AV_CODEC_ID_IFF_ILBM; break; default: return -1; } return 0; } static unsigned get_anim_duration(uint8_t *buf, int size) { GetByteContext gb; bytestream2_init(&gb, buf, size); bytestream2_skip(&gb, 4); while (bytestream2_get_bytes_left(&gb) > 8) { unsigned chunk = bytestream2_get_le32(&gb); unsigned size = bytestream2_get_be32(&gb); if (chunk == ID_ANHD) { if (size < 40) break; bytestream2_skip(&gb, 14); return bytestream2_get_be32(&gb); } else { bytestream2_skip(&gb, size + size & 1); } } return 10; } static int iff_read_packet(AVFormatContext *s, AVPacket *pkt) { IffDemuxContext *iff = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = s->streams[0]; int ret; int64_t pos = avio_tell(pb); if (avio_feof(pb)) return AVERROR_EOF; if (st->codecpar->codec_tag != ID_ANIM && pos >= iff->body_end) return AVERROR_EOF; if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { if (st->codecpar->codec_tag == ID_DSD || st->codecpar->codec_tag == ID_MAUD) { ret = av_get_packet(pb, pkt, FFMIN(iff->body_end - pos, 1024 * st->codecpar->block_align)); } else if (st->codecpar->codec_tag == ID_DST) { return read_dst_frame(s, pkt); } else { if (iff->body_size > INT_MAX) return AVERROR_INVALIDDATA; ret = av_get_packet(pb, pkt, iff->body_size); } } else if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->codecpar->codec_tag == ID_ANIM) { uint64_t data_size, orig_pos; uint32_t chunk_id, chunk_id2; while (!avio_feof(pb)) { if (avio_feof(pb)) return AVERROR_EOF; orig_pos = avio_tell(pb); chunk_id = avio_rl32(pb); data_size = avio_rb32(pb); chunk_id2 = avio_rl32(pb); if (chunk_id == ID_FORM && chunk_id2 == ID_ILBM) { avio_skip(pb, -4); break; } else if (chunk_id == ID_FORM && chunk_id2 == ID_ANIM) { continue; } else { avio_skip(pb, data_size); } } ret = av_get_packet(pb, pkt, data_size); pkt->pos = orig_pos; pkt->duration = get_anim_duration(pkt->data, pkt->size); if (pos == 12) pkt->flags |= AV_PKT_FLAG_KEY; } else if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->codecpar->codec_tag != ID_ANIM) { ret = av_get_packet(pb, pkt, iff->body_size); pkt->pos = pos; if (pos == iff->body_pos) pkt->flags |= AV_PKT_FLAG_KEY; } else { av_assert0(0); } if (ret < 0) return ret; pkt->stream_index = 0; return ret; } AVInputFormat ff_iff_demuxer = { .name = "iff", .long_name = NULL_IF_CONFIG_SMALL("IFF (Interchange File Format)"), .priv_data_size = sizeof(IffDemuxContext), .read_probe = iff_probe, .read_header = iff_read_header, .read_packet = iff_read_packet, .flags = AVFMT_GENERIC_INDEX | AVFMT_NO_BYTE_SEEK, };
null
null
null
null
72,190
44,475
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
44,475
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef PPAPI_HOST_INSTANCE_MESSAGE_FILTER_H_ #define PPAPI_HOST_INSTANCE_MESSAGE_FILTER_H_ #include "base/macros.h" #include "ppapi/host/ppapi_host_export.h" namespace IPC { class Message; } namespace ppapi { namespace host { class PpapiHost; class PPAPI_HOST_EXPORT InstanceMessageFilter { public: explicit InstanceMessageFilter(PpapiHost* host); virtual ~InstanceMessageFilter(); // Processes an instance message from the plugin process. Returns true if the // message was handled. On false, the PpapiHost will forward the message to // the next filter. virtual bool OnInstanceMessageReceived(const IPC::Message& msg) = 0; PpapiHost* host() { return host_; } private: PpapiHost* host_; DISALLOW_COPY_AND_ASSIGN(InstanceMessageFilter); }; } // namespace host } // namespace ppapi #endif // PPAPI_HOST_INSTANCE_MESSAGE_FILTER_H_
null
null
null
null
41,338
37,849
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
37,849
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2008, 2012 Apple Computer, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_GRADIENT_GENERATED_IMAGE_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_GRADIENT_GENERATED_IMAGE_H_ #include "base/memory/scoped_refptr.h" #include "third_party/blink/renderer/platform/graphics/generated_image.h" #include "third_party/blink/renderer/platform/graphics/gradient.h" namespace blink { class PLATFORM_EXPORT GradientGeneratedImage final : public GeneratedImage { public: static scoped_refptr<GradientGeneratedImage> Create( scoped_refptr<Gradient> generator, const FloatSize& size) { return base::AdoptRef( new GradientGeneratedImage(std::move(generator), size)); } ~GradientGeneratedImage() override = default; bool ApplyShader(PaintFlags&, const SkMatrix&) override; protected: void Draw(PaintCanvas*, const PaintFlags&, const FloatRect&, const FloatRect&, RespectImageOrientationEnum, ImageClampingMode, ImageDecodingMode) override; void DrawTile(GraphicsContext&, const FloatRect&) override; GradientGeneratedImage(scoped_refptr<Gradient> generator, const FloatSize& size) : GeneratedImage(size), gradient_(std::move(generator)) {} scoped_refptr<Gradient> gradient_; }; } // namespace blink #endif
null
null
null
null
34,712
63,774
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
63,774
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_WEBUI_INVALIDATIONS_UI_H_ #define CHROME_BROWSER_UI_WEBUI_INVALIDATIONS_UI_H_ #include "base/macros.h" #include "content/public/browser/web_ui_controller.h" // The implementation for the chrome://invalidations page. class InvalidationsUI : public content::WebUIController { public: explicit InvalidationsUI(content::WebUI* web_ui); ~InvalidationsUI() override; private: DISALLOW_COPY_AND_ASSIGN(InvalidationsUI); }; #endif // CHROME_BROWSER_UI_WEBUI_INVALIDATIONS_UI_H_
null
null
null
null
60,637
28,466
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
193,461
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* p80211conv.h * * Ether/802.11 conversions and packet buffer routines * * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- * * This file declares the functions, types and macros that perform * Ethernet to/from 802.11 frame conversions. * * -------------------------------------------------------------------- */ #ifndef _LINUX_P80211CONV_H #define _LINUX_P80211CONV_H #define WLAN_IEEE_OUI_LEN 3 #define WLAN_ETHCONV_ENCAP 1 #define WLAN_ETHCONV_8021h 3 #define P80211CAPTURE_VERSION 0x80211001 #define P80211_FRMMETA_MAGIC 0x802110 #define P80211SKB_FRMMETA(s) \ (((((struct p80211_frmmeta *)((s)->cb))->magic) == \ P80211_FRMMETA_MAGIC) ? \ ((struct p80211_frmmeta *)((s)->cb)) : \ (NULL)) #define P80211SKB_RXMETA(s) \ (P80211SKB_FRMMETA((s)) ? P80211SKB_FRMMETA((s))->rx : \ ((struct p80211_rxmeta *)(NULL))) struct p80211_rxmeta { struct wlandevice *wlandev; u64 mactime; /* Hi-rez MAC-supplied time value */ u64 hosttime; /* Best-rez host supplied time value */ unsigned int rxrate; /* Receive data rate in 100kbps */ unsigned int priority; /* 0-15, 0=contention, 6=CF */ int signal; /* An SSI, see p80211netdev.h */ int noise; /* An SSI, see p80211netdev.h */ unsigned int channel; /* Receive channel (mostly for snifs) */ unsigned int preamble; /* P80211ENUM_preambletype_* */ unsigned int encoding; /* P80211ENUM_encoding_* */ }; struct p80211_frmmeta { unsigned int magic; struct p80211_rxmeta *rx; }; void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb); int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb); void p80211skb_rxmeta_detach(struct sk_buff *skb); /* * Frame capture header. (See doc/capturefrm.txt) */ struct p80211_caphdr { u32 version; u32 length; u64 mactime; u64 hosttime; u32 phytype; u32 channel; u32 datarate; u32 antenna; u32 priority; u32 ssi_type; s32 ssi_signal; s32 ssi_noise; u32 preamble; u32 encoding; }; /* buffer free method pointer type */ typedef void (*freebuf_method_t) (void *buf, int size); struct p80211_metawep { void *data; u8 iv[4]; u8 icv[4]; }; /* local ether header type */ struct wlan_ethhdr { u8 daddr[ETH_ALEN]; u8 saddr[ETH_ALEN]; __be16 type; } __packed; /* local llc header type */ struct wlan_llc { u8 dsap; u8 ssap; u8 ctl; } __packed; /* local snap header type */ struct wlan_snap { u8 oui[WLAN_IEEE_OUI_LEN]; __be16 type; } __packed; /* Circular include trick */ struct wlandevice; int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv, struct sk_buff *skb); int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep); int p80211_stt_findproto(u16 proto); #endif
null
null
null
null
101,808
26,050
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,045
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_trace.h" #include "sid.h" const u32 sdma_offsets[SDMA_MAX_INSTANCE] = { DMA0_REGISTER_OFFSET, DMA1_REGISTER_OFFSET }; static void si_dma_set_ring_funcs(struct amdgpu_device *adev); static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); static void si_dma_set_irq_funcs(struct amdgpu_device *adev); static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) { return ring->adev->wb.wb[ring->rptr_offs>>2]; } static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; } static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; WREG32(DMA_RB_WPTR + sdma_offsets[me], (lower_32_bits(ring->wptr) << 2) & 0x3fffc); } static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. * Pad as necessary with NOPs. */ while ((lower_32_bits(ring->wptr) & 7) != 5) amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); } static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL)); amdgpu_ring_write(ring, 1); } static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0)); amdgpu_ring_write(ring, 1); } /** * si_dma_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer * @fence: amdgpu fence object * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate * an interrupt if needed (VI). */ static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; /* write the fence */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); amdgpu_ring_write(ring, seq); /* optionally write high bits as well */ if (write64bit) { addr += 4; amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); amdgpu_ring_write(ring, upper_32_bits(seq)); } /* generate an interrupt */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); } static void si_dma_stop(struct amdgpu_device *adev) { struct amdgpu_ring *ring; u32 rb_cntl; unsigned i; for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; /* dma0 */ rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~DMA_RB_ENABLE; WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); ring->ready = false; } } static int si_dma_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; int i, r; uint64_t rptr_addr; for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; #endif WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); /* Initialize the ring buffer's read and write pointers */ WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); /* enable DMA IBs */ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; #ifdef __BIG_ENDIAN ib_cntl |= DMA_IB_SWAP_ENABLE; #endif WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); dma_cntl &= ~CTXEMPTY_INT_ENABLE; WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); ring->wptr = 0; WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; return r; } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); } return 0; } /** * si_dma_ring_test_ring - simple async dma engine test * * @ring: amdgpu_ring structure holding ring information * * Test the DMA engine by writing using it to write an * value to memory. (VI). * Returns 0 for success, error for failure. */ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; unsigned i; unsigned index; int r; u32 tmp; u64 gpu_addr; r = amdgpu_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 4); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); return r; } amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); } if (i < adev->usec_timeout) { DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); r = -EINVAL; } amdgpu_wb_free(adev, index); return r; } /** * si_dma_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; long r; r = amdgpu_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; } /** * cik_dma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @src: src addr to copy from * @count: number of page entries to update * * Update PTEs by copying them from the GART using DMA (SI). */ static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { unsigned bytes = count * 8; ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, bytes); ib->ptr[ib->length_dw++] = lower_32_bits(pe); ib->ptr[ib->length_dw++] = lower_32_bits(src); ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; } /** * si_dma_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * * Update PTEs by writing them manually using DMA (SI). */ static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) { unsigned ndw = count * 2; ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); ib->ptr[ib->length_dw++] = lower_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe); for (; ndw > 0; ndw -= 2) { ib->ptr[ib->length_dw++] = lower_32_bits(value); ib->ptr[ib->length_dw++] = upper_32_bits(value); value += incr; } } /** * si_dma_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK). */ static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; if (flags & AMDGPU_PTE_VALID) value = addr; else value = 0; /* for physically contiguous pages (vram) */ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); ib->ptr[ib->length_dw++] = pe; /* dst addr */ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ ib->ptr[ib->length_dw++] = upper_32_bits(flags); ib->ptr[ib->length_dw++] = value; /* value */ ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = 0; pe += ndw * 4; addr += (ndw / 2) * incr; count -= ndw / 2; } } /** * si_dma_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); } /** * cik_sdma_ring_emit_pipeline_sync - sync the pipeline * * @ring: amdgpu_ring pointer * * Make sure all previous operations are completed (CIK). */ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) { uint32_t seq = ring->fence_drv.sync_seq; uint64_t addr = ring->fence_drv.gpu_addr; /* wait for idle */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | (1 << 27)); /* Poll memory */ amdgpu_ring_write(ring, lower_32_bits(addr)); amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, seq); /* value */ amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ } /** * si_dma_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer * @vm: amdgpu_vm pointer * * Update the page table base and flush the VM TLB * using sDMA (VI). */ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); if (vm_id < 8) amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); else amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); amdgpu_ring_write(ring, pd_addr >> 12); /* bits 0-7 are the VM contexts0-7 */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST)); amdgpu_ring_write(ring, 1 << vm_id); /* wait for invalidate to complete */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0xff << 16); /* retry */ amdgpu_ring_write(ring, 1 << vm_id); /* mask */ amdgpu_ring_write(ring, 0); /* value */ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ } static int si_dma_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->sdma.num_instances = 2; si_dma_set_ring_funcs(adev); si_dma_set_buffer_funcs(adev); si_dma_set_vm_pte_funcs(adev); si_dma_set_irq_funcs(adev); return 0; } static int si_dma_sw_init(void *handle) { struct amdgpu_ring *ring; int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* DMA0 trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq); if (r) return r; /* DMA1 trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1); if (r) return r; for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = false; sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1); if (r) return r; } return r; } static int si_dma_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); return 0; } static int si_dma_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return si_dma_start(adev); } static int si_dma_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; si_dma_stop(adev); return 0; } static int si_dma_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return si_dma_hw_fini(adev); } static int si_dma_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return si_dma_hw_init(adev); } static bool si_dma_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(SRBM_STATUS2); if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) return false; return true; } static int si_dma_wait_for_idle(void *handle) { unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { if (si_dma_is_idle(handle)) return 0; udelay(1); } return -ETIMEDOUT; } static int si_dma_soft_reset(void *handle) { DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); return 0; } static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { u32 sdma_cntl; switch (type) { case AMDGPU_SDMA_IRQ_TRAP0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); sdma_cntl &= ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); sdma_cntl |= TRAP_ENABLE; WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); break; default: break; } break; case AMDGPU_SDMA_IRQ_TRAP1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); sdma_cntl &= ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); sdma_cntl |= TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); break; default: break; } break; default: break; } return 0; } static int si_dma_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { amdgpu_fence_process(&adev->sdma.instance[0].ring); return 0; } static int si_dma_process_trap_irq_1(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { amdgpu_fence_process(&adev->sdma.instance[1].ring); return 0; } static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal instruction in SDMA command stream\n"); schedule_work(&adev->reset_work); return 0; } static int si_dma_set_clockgating_state(void *handle, enum amd_clockgating_state state) { u32 orig, data, offset; int i; bool enable; struct amdgpu_device *adev = (struct amdgpu_device *)handle; enable = (state == AMD_CG_STATE_GATE) ? true : false; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { for (i = 0; i < adev->sdma.num_instances; i++) { if (i == 0) offset = DMA0_REGISTER_OFFSET; else offset = DMA1_REGISTER_OFFSET; orig = data = RREG32(DMA_POWER_CNTL + offset); data &= ~MEM_POWER_OVERRIDE; if (data != orig) WREG32(DMA_POWER_CNTL + offset, data); WREG32(DMA_CLK_CTRL + offset, 0x00000100); } } else { for (i = 0; i < adev->sdma.num_instances; i++) { if (i == 0) offset = DMA0_REGISTER_OFFSET; else offset = DMA1_REGISTER_OFFSET; orig = data = RREG32(DMA_POWER_CNTL + offset); data |= MEM_POWER_OVERRIDE; if (data != orig) WREG32(DMA_POWER_CNTL + offset, data); orig = data = RREG32(DMA_CLK_CTRL + offset); data = 0xff000000; if (data != orig) WREG32(DMA_CLK_CTRL + offset, data); } } return 0; } static int si_dma_set_powergating_state(void *handle, enum amd_powergating_state state) { u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; WREG32(DMA_PGFSM_WRITE, 0x00002000); WREG32(DMA_PGFSM_CONFIG, 0x100010ff); for (tmp = 0; tmp < 5; tmp++) WREG32(DMA_PGFSM_WRITE, 0); return 0; } static const struct amd_ip_funcs si_dma_ip_funcs = { .name = "si_dma", .early_init = si_dma_early_init, .late_init = NULL, .sw_init = si_dma_sw_init, .sw_fini = si_dma_sw_fini, .hw_init = si_dma_hw_init, .hw_fini = si_dma_hw_fini, .suspend = si_dma_suspend, .resume = si_dma_resume, .is_idle = si_dma_is_idle, .wait_for_idle = si_dma_wait_for_idle, .soft_reset = si_dma_soft_reset, .set_clockgating_state = si_dma_set_clockgating_state, .set_powergating_state = si_dma_set_powergating_state, }; static const struct amdgpu_ring_funcs si_dma_ring_funcs = { .type = AMDGPU_RING_TYPE_SDMA, .align_mask = 0xf, .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), .support_64bit_ptrs = false, .get_rptr = si_dma_ring_get_rptr, .get_wptr = si_dma_ring_get_wptr, .set_wptr = si_dma_ring_set_wptr, .emit_frame_size = 3 + /* si_dma_ring_emit_hdp_flush */ 3 + /* si_dma_ring_emit_hdp_invalidate */ 6 + /* si_dma_ring_emit_pipeline_sync */ 12 + /* si_dma_ring_emit_vm_flush */ 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ .emit_ib = si_dma_ring_emit_ib, .emit_fence = si_dma_ring_emit_fence, .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, .emit_vm_flush = si_dma_ring_emit_vm_flush, .emit_hdp_flush = si_dma_ring_emit_hdp_flush, .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate, .test_ring = si_dma_ring_test_ring, .test_ib = si_dma_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = si_dma_ring_pad_ib, }; static void si_dma_set_ring_funcs(struct amdgpu_device *adev) { int i; for (i = 0; i < adev->sdma.num_instances; i++) adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; } static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { .set = si_dma_set_trap_irq_state, .process = si_dma_process_trap_irq, }; static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = { .set = si_dma_set_trap_irq_state, .process = si_dma_process_trap_irq_1, }; static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = { .process = si_dma_process_illegal_inst_irq, }; static void si_dma_set_irq_funcs(struct amdgpu_device *adev) { adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1; adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs; } /** * si_dma_emit_copy_buffer - copy buffer using the sDMA engine * * @ring: amdgpu_ring structure holding ring information * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if * registered as the asic copy callback. */ static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count) { ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, byte_count); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; } /** * si_dma_emit_fill_buffer - fill buffer using the sDMA engine * * @ring: amdgpu_ring structure holding ring information * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * * Fill GPU buffers using the DMA engine (VI). */ static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) { ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, 0, 0, 0, byte_count / 4); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = src_data; ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; } static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { .copy_max_bytes = 0xffff8, .copy_num_dw = 5, .emit_copy_buffer = si_dma_emit_copy_buffer, .fill_max_bytes = 0xffff8, .fill_num_dw = 4, .emit_fill_buffer = si_dma_emit_fill_buffer, }; static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) { if (adev->mman.buffer_funcs == NULL) { adev->mman.buffer_funcs = &si_dma_buffer_funcs; adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } } static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { .copy_pte = si_dma_vm_copy_pte, .write_pte = si_dma_vm_write_pte, .set_pte_pde = si_dma_vm_set_pte_pde, }; static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) { unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) adev->vm_manager.vm_pte_rings[i] = &adev->sdma.instance[i].ring; adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; } } const struct amdgpu_ip_block_version si_dma_ip_block = { .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 1, .minor = 0, .rev = 0, .funcs = &si_dma_ip_funcs, };
null
null
null
null
99,392
6,970
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
6,970
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NET_QUIC_CHROMIUM_QUIC_SESSION_KEY_H_ #define NET_QUIC_CHROMIUM_QUIC_SESSION_KEY_H_ #include "net/quic/core/quic_server_id.h" #include "net/socket/socket_tag.h" namespace net { // The key used to identify sessions. Includes the QuicServerId and socket tag. class QUIC_EXPORT_PRIVATE QuicSessionKey { public: QuicSessionKey() = default; QuicSessionKey(const HostPortPair& host_port_pair, PrivacyMode privacy_mode, const SocketTag& socket_tag); QuicSessionKey(const std::string& host, uint16_t port, PrivacyMode privacy_mode, const SocketTag& socket_tag); QuicSessionKey(const QuicServerId& server_id, const SocketTag& socket_tag); ~QuicSessionKey() = default; // Needed to be an element of std::set. bool operator<(const QuicSessionKey& other) const; bool operator==(const QuicSessionKey& other) const; const std::string& host() const { return server_id_.host(); } PrivacyMode privacy_mode() const { return server_id_.privacy_mode(); } const QuicServerId& server_id() const { return server_id_; } SocketTag socket_tag() const { return socket_tag_; } size_t EstimateMemoryUsage() const; private: QuicServerId server_id_; SocketTag socket_tag_; }; } // namespace net #endif // NET_QUIC_CHROMIUM_QUIC_SERVER_ID_H_
null
null
null
null
3,833
23,661
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
188,656
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/drivers/video/riva/fbdev-i2c.c - nVidia i2c * * Maintained by Ani Joshi <ajoshi@shell.unixbox.com> * * Copyright 2004 Antonino A. Daplas <adaplas @pol.net> * * Based on radeonfb-i2c.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/fb.h> #include <linux/jiffies.h> #include <asm/io.h> #include "rivafb.h" #include "../edid.h" static void riva_gpio_setscl(void* data, int state) { struct riva_i2c_chan *chan = data; struct riva_par *par = chan->par; u32 val; VGA_WR08(par->riva.PCIO, 0x3d4, chan->ddc_base + 1); val = VGA_RD08(par->riva.PCIO, 0x3d5) & 0xf0; if (state) val |= 0x20; else val &= ~0x20; VGA_WR08(par->riva.PCIO, 0x3d4, chan->ddc_base + 1); VGA_WR08(par->riva.PCIO, 0x3d5, val | 0x1); } static void riva_gpio_setsda(void* data, int state) { struct riva_i2c_chan *chan = data; struct riva_par *par = chan->par; u32 val; VGA_WR08(par->riva.PCIO, 0x3d4, chan->ddc_base + 1); val = VGA_RD08(par->riva.PCIO, 0x3d5) & 0xf0; if (state) val |= 0x10; else val &= ~0x10; VGA_WR08(par->riva.PCIO, 0x3d4, chan->ddc_base + 1); VGA_WR08(par->riva.PCIO, 0x3d5, val | 0x1); } static int riva_gpio_getscl(void* data) { struct riva_i2c_chan *chan = data; struct riva_par *par = chan->par; u32 val = 0; VGA_WR08(par->riva.PCIO, 0x3d4, chan->ddc_base); if (VGA_RD08(par->riva.PCIO, 0x3d5) & 0x04) val = 1; return val; } static int riva_gpio_getsda(void* data) { struct riva_i2c_chan *chan = data; struct riva_par *par = chan->par; u32 val = 0; VGA_WR08(par->riva.PCIO, 0x3d4, chan->ddc_base); if (VGA_RD08(par->riva.PCIO, 0x3d5) & 0x08) val = 1; return val; } static int riva_setup_i2c_bus(struct riva_i2c_chan *chan, const char *name, unsigned int i2c_class) { int rc; strcpy(chan->adapter.name, name); chan->adapter.owner = THIS_MODULE; chan->adapter.class = i2c_class; chan->adapter.algo_data = &chan->algo; chan->adapter.dev.parent = &chan->par->pdev->dev; chan->algo.setsda = riva_gpio_setsda; chan->algo.setscl = riva_gpio_setscl; chan->algo.getsda = riva_gpio_getsda; chan->algo.getscl = riva_gpio_getscl; chan->algo.udelay = 40; chan->algo.timeout = msecs_to_jiffies(2); chan->algo.data = chan; i2c_set_adapdata(&chan->adapter, chan); /* Raise SCL and SDA */ riva_gpio_setsda(chan, 1); riva_gpio_setscl(chan, 1); udelay(20); rc = i2c_bit_add_bus(&chan->adapter); if (rc == 0) dev_dbg(&chan->par->pdev->dev, "I2C bus %s registered.\n", name); else { dev_warn(&chan->par->pdev->dev, "Failed to register I2C bus %s.\n", name); chan->par = NULL; } return rc; } void riva_create_i2c_busses(struct riva_par *par) { par->chan[0].par = par; par->chan[1].par = par; par->chan[2].par = par; par->chan[0].ddc_base = 0x36; par->chan[1].ddc_base = 0x3e; par->chan[2].ddc_base = 0x50; riva_setup_i2c_bus(&par->chan[0], "BUS1", I2C_CLASS_HWMON); riva_setup_i2c_bus(&par->chan[1], "BUS2", 0); riva_setup_i2c_bus(&par->chan[2], "BUS3", 0); } void riva_delete_i2c_busses(struct riva_par *par) { int i; for (i = 0; i < 3; i++) { if (!par->chan[i].par) continue; i2c_del_adapter(&par->chan[i].adapter); par->chan[i].par = NULL; } } int riva_probe_i2c_connector(struct riva_par *par, int conn, u8 **out_edid) { u8 *edid = NULL; if (par->chan[conn].par) edid = fb_ddc_read(&par->chan[conn].adapter); if (out_edid) *out_edid = edid; if (!edid) return 1; return 0; }
null
null
null
null
97,003
1,751
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,808
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * AC-3 tables * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * tables taken directly from the AC-3 spec. */ #include "libavutil/channel_layout.h" #include "libavutil/mem.h" #include "avcodec.h" #include "ac3tab.h" /** * Possible frame sizes. * from ATSC A/52 Table 5.18 Frame Size Code Table. */ const uint16_t ff_ac3_frame_size_tab[38][3] = { { 64, 69, 96 }, { 64, 70, 96 }, { 80, 87, 120 }, { 80, 88, 120 }, { 96, 104, 144 }, { 96, 105, 144 }, { 112, 121, 168 }, { 112, 122, 168 }, { 128, 139, 192 }, { 128, 140, 192 }, { 160, 174, 240 }, { 160, 175, 240 }, { 192, 208, 288 }, { 192, 209, 288 }, { 224, 243, 336 }, { 224, 244, 336 }, { 256, 278, 384 }, { 256, 279, 384 }, { 320, 348, 480 }, { 320, 349, 480 }, { 384, 417, 576 }, { 384, 418, 576 }, { 448, 487, 672 }, { 448, 488, 672 }, { 512, 557, 768 }, { 512, 558, 768 }, { 640, 696, 960 }, { 640, 697, 960 }, { 768, 835, 1152 }, { 768, 836, 1152 }, { 896, 975, 1344 }, { 896, 976, 1344 }, { 1024, 1114, 1536 }, { 1024, 1115, 1536 }, { 1152, 1253, 1728 }, { 1152, 1254, 1728 }, { 1280, 1393, 1920 }, { 1280, 1394, 1920 }, }; /** * Map audio coding mode (acmod) to number of full-bandwidth channels. * from ATSC A/52 Table 5.8 Audio Coding Mode */ const uint8_t ff_ac3_channels_tab[8] = { 2, 1, 2, 3, 3, 4, 4, 5 }; /** * Map audio coding mode (acmod) to channel layout mask. */ const uint16_t avpriv_ac3_channel_layout_tab[8] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_SURROUND, AV_CH_LAYOUT_2_1, AV_CH_LAYOUT_4POINT0, AV_CH_LAYOUT_2_2, AV_CH_LAYOUT_5POINT0 }; #define COMMON_CHANNEL_MAP \ { { 0, 1, }, { 0, 1, 2, } },\ { { 0, }, { 0, 1, } },\ { { 0, 1, }, { 0, 1, 2, } },\ { { 0, 2, 1, }, { 0, 2, 1, 3, } },\ { { 0, 1, 2, }, { 0, 1, 3, 2, } },\ { { 0, 2, 1, 3, }, { 0, 2, 1, 4, 3, } }, /** * Table to remap channels from SMPTE order to AC-3 order. * [channel_mode][lfe][ch] */ const uint8_t ff_ac3_enc_channel_map[8][2][6] = { COMMON_CHANNEL_MAP { { 0, 1, 2, 3, }, { 0, 1, 3, 4, 2, } }, { { 0, 2, 1, 3, 4, }, { 0, 2, 1, 4, 5, 3 } }, }; /** * Table to remap channels from AC-3 order to SMPTE order. * [channel_mode][lfe][ch] */ const uint8_t ff_ac3_dec_channel_map[8][2][6] = { COMMON_CHANNEL_MAP { { 0, 1, 2, 3, }, { 0, 1, 4, 2, 3, } }, { { 0, 2, 1, 3, 4, }, { 0, 2, 1, 5, 3, 4 } }, }; /* possible frequencies */ const uint16_t ff_ac3_sample_rate_tab[3] = { 48000, 44100, 32000 }; /* possible bitrates */ const uint16_t ff_ac3_bitrate_tab[19] = { 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640 }; /** * Table of bin locations for rematrixing bands * reference: Section 7.5.2 Rematrixing : Frequency Band Definitions */ const uint8_t ff_ac3_rematrix_band_tab[5] = { 13, 25, 37, 61, 253 }; /** * Table E2.16 Default Coupling Banding Structure */ const uint8_t ff_eac3_default_cpl_band_struct[18] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1 }; /* AC-3 MDCT window */ /* MDCT window */ DECLARE_ALIGNED(16, const int16_t, ff_ac3_window)[AC3_WINDOW_SIZE/2] = { 4, 7, 12, 16, 21, 28, 34, 42, 51, 61, 72, 84, 97, 111, 127, 145, 164, 184, 207, 231, 257, 285, 315, 347, 382, 419, 458, 500, 544, 591, 641, 694, 750, 810, 872, 937, 1007, 1079, 1155, 1235, 1318, 1406, 1497, 1593, 1692, 1796, 1903, 2016, 2132, 2253, 2379, 2509, 2644, 2783, 2927, 3076, 3230, 3389, 3552, 3721, 3894, 4072, 4255, 4444, 4637, 4835, 5038, 5246, 5459, 5677, 5899, 6127, 6359, 6596, 6837, 7083, 7334, 7589, 7848, 8112, 8380, 8652, 8927, 9207, 9491, 9778,10069,10363, 10660,10960,11264,11570,11879,12190,12504,12820, 13138,13458,13780,14103,14427,14753,15079,15407, 15735,16063,16392,16720,17049,17377,17705,18032, 18358,18683,19007,19330,19651,19970,20287,20602, 20914,21225,21532,21837,22139,22438,22733,23025, 23314,23599,23880,24157,24430,24699,24964,25225, 25481,25732,25979,26221,26459,26691,26919,27142, 27359,27572,27780,27983,28180,28373,28560,28742, 28919,29091,29258,29420,29577,29729,29876,30018, 30155,30288,30415,30538,30657,30771,30880,30985, 31086,31182,31274,31363,31447,31528,31605,31678, 31747,31814,31877,31936,31993,32046,32097,32145, 32190,32232,32272,32310,32345,32378,32409,32438, 32465,32490,32513,32535,32556,32574,32592,32608, 32623,32636,32649,32661,32671,32681,32690,32698, 32705,32712,32718,32724,32729,32733,32737,32741, 32744,32747,32750,32752,32754,32756,32757,32759, 32760,32761,32762,32763,32764,32764,32765,32765, 32766,32766,32766,32766,32767,32767,32767,32767, 32767,32767,32767,32767,32767,32767,32767,32767, 32767,32767,32767,32767,32767,32767,32767,32767, }; const uint8_t ff_ac3_log_add_tab[260]= { 0x40,0x3f,0x3e,0x3d,0x3c,0x3b,0x3a,0x39,0x38,0x37, 0x36,0x35,0x34,0x34,0x33,0x32,0x31,0x30,0x2f,0x2f, 0x2e,0x2d,0x2c,0x2c,0x2b,0x2a,0x29,0x29,0x28,0x27, 0x26,0x26,0x25,0x24,0x24,0x23,0x23,0x22,0x21,0x21, 0x20,0x20,0x1f,0x1e,0x1e,0x1d,0x1d,0x1c,0x1c,0x1b, 0x1b,0x1a,0x1a,0x19,0x19,0x18,0x18,0x17,0x17,0x16, 0x16,0x15,0x15,0x15,0x14,0x14,0x13,0x13,0x13,0x12, 0x12,0x12,0x11,0x11,0x11,0x10,0x10,0x10,0x0f,0x0f, 0x0f,0x0e,0x0e,0x0e,0x0d,0x0d,0x0d,0x0d,0x0c,0x0c, 0x0c,0x0c,0x0b,0x0b,0x0b,0x0b,0x0a,0x0a,0x0a,0x0a, 0x0a,0x09,0x09,0x09,0x09,0x09,0x08,0x08,0x08,0x08, 0x08,0x08,0x07,0x07,0x07,0x07,0x07,0x07,0x06,0x06, 0x06,0x06,0x06,0x06,0x06,0x06,0x05,0x05,0x05,0x05, 0x05,0x05,0x05,0x05,0x04,0x04,0x04,0x04,0x04,0x04, 0x04,0x04,0x04,0x04,0x04,0x03,0x03,0x03,0x03,0x03, 0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x02, 0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02, 0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x01,0x01, 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, }; const uint16_t ff_ac3_hearing_threshold_tab[AC3_CRITICAL_BANDS][3]= { { 0x04d0,0x04f0,0x0580 }, { 0x04d0,0x04f0,0x0580 }, { 0x0440,0x0460,0x04b0 }, { 0x0400,0x0410,0x0450 }, { 0x03e0,0x03e0,0x0420 }, { 0x03c0,0x03d0,0x03f0 }, { 0x03b0,0x03c0,0x03e0 }, { 0x03b0,0x03b0,0x03d0 }, { 0x03a0,0x03b0,0x03c0 }, { 0x03a0,0x03a0,0x03b0 }, { 0x03a0,0x03a0,0x03b0 }, { 0x03a0,0x03a0,0x03b0 }, { 0x03a0,0x03a0,0x03a0 }, { 0x0390,0x03a0,0x03a0 }, { 0x0390,0x0390,0x03a0 }, { 0x0390,0x0390,0x03a0 }, { 0x0380,0x0390,0x03a0 }, { 0x0380,0x0380,0x03a0 }, { 0x0370,0x0380,0x03a0 }, { 0x0370,0x0380,0x03a0 }, { 0x0360,0x0370,0x0390 }, { 0x0360,0x0370,0x0390 }, { 0x0350,0x0360,0x0390 }, { 0x0350,0x0360,0x0390 }, { 0x0340,0x0350,0x0380 }, { 0x0340,0x0350,0x0380 }, { 0x0330,0x0340,0x0380 }, { 0x0320,0x0340,0x0370 }, { 0x0310,0x0320,0x0360 }, { 0x0300,0x0310,0x0350 }, { 0x02f0,0x0300,0x0340 }, { 0x02f0,0x02f0,0x0330 }, { 0x02f0,0x02f0,0x0320 }, { 0x02f0,0x02f0,0x0310 }, { 0x0300,0x02f0,0x0300 }, { 0x0310,0x0300,0x02f0 }, { 0x0340,0x0320,0x02f0 }, { 0x0390,0x0350,0x02f0 }, { 0x03e0,0x0390,0x0300 }, { 0x0420,0x03e0,0x0310 }, { 0x0460,0x0420,0x0330 }, { 0x0490,0x0450,0x0350 }, { 0x04a0,0x04a0,0x03c0 }, { 0x0460,0x0490,0x0410 }, { 0x0440,0x0460,0x0470 }, { 0x0440,0x0440,0x04a0 }, { 0x0520,0x0480,0x0460 }, { 0x0800,0x0630,0x0440 }, { 0x0840,0x0840,0x0450 }, { 0x0840,0x0840,0x04e0 }, }; const uint8_t ff_ac3_bap_tab[64]= { 0, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, }; const uint8_t ff_ac3_slow_decay_tab[4]={ 0x0f, 0x11, 0x13, 0x15, }; const uint8_t ff_ac3_fast_decay_tab[4]={ 0x3f, 0x53, 0x67, 0x7b, }; const uint16_t ff_ac3_slow_gain_tab[4]= { 0x540, 0x4d8, 0x478, 0x410, }; const uint16_t ff_ac3_db_per_bit_tab[4]= { 0x000, 0x700, 0x900, 0xb00, }; const int16_t ff_ac3_floor_tab[8]= { 0x2f0, 0x2b0, 0x270, 0x230, 0x1f0, 0x170, 0x0f0, 0xf800, }; const uint16_t ff_ac3_fast_gain_tab[8]= { 0x080, 0x100, 0x180, 0x200, 0x280, 0x300, 0x380, 0x400, }; /** * Default channel map for a dependent substream defined by acmod */ const uint16_t ff_eac3_default_chmap[8] = { AC3_CHMAP_L | AC3_CHMAP_R, // FIXME Ch1+Ch2 AC3_CHMAP_C, AC3_CHMAP_L | AC3_CHMAP_R, AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R, AC3_CHMAP_L | AC3_CHMAP_R | AC3_CHMAP_C_SUR, AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R | AC3_CHMAP_C_SUR, AC3_CHMAP_L | AC3_CHMAP_R | AC3_CHMAP_L_SUR | AC3_CHMAP_R_SUR, AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R | AC3_CHMAP_L_SUR | AC3_CHMAP_R_SUR };
null
null
null
null
70,863
5,200
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
170,195
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Apple Onboard Audio GPIO definitions * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #ifndef __AOA_GPIO_H #define __AOA_GPIO_H #include <linux/workqueue.h> #include <linux/mutex.h> #include <asm/prom.h> typedef void (*notify_func_t)(void *data); enum notify_type { AOA_NOTIFY_HEADPHONE, AOA_NOTIFY_LINE_IN, AOA_NOTIFY_LINE_OUT, }; struct gpio_runtime; struct gpio_methods { /* for initialisation/de-initialisation of the GPIO layer */ void (*init)(struct gpio_runtime *rt); void (*exit)(struct gpio_runtime *rt); /* turn off headphone, speakers, lineout */ void (*all_amps_off)(struct gpio_runtime *rt); /* turn headphone, speakers, lineout back to previous setting */ void (*all_amps_restore)(struct gpio_runtime *rt); void (*set_headphone)(struct gpio_runtime *rt, int on); void (*set_speakers)(struct gpio_runtime *rt, int on); void (*set_lineout)(struct gpio_runtime *rt, int on); void (*set_master)(struct gpio_runtime *rt, int on); int (*get_headphone)(struct gpio_runtime *rt); int (*get_speakers)(struct gpio_runtime *rt); int (*get_lineout)(struct gpio_runtime *rt); int (*get_master)(struct gpio_runtime *rt); void (*set_hw_reset)(struct gpio_runtime *rt, int on); /* use this to be notified of any events. The notification * function is passed the data, and is called in process * context by the use of schedule_work. * The interface for it is that setting a function to NULL * removes it, and they return 0 if the operation succeeded, * and -EBUSY if the notification is already assigned by * someone else. */ int (*set_notify)(struct gpio_runtime *rt, enum notify_type type, notify_func_t notify, void *data); /* returns 0 if not plugged in, 1 if plugged in * or a negative error code */ int (*get_detect)(struct gpio_runtime *rt, enum notify_type type); }; struct gpio_notification { struct delayed_work work; notify_func_t notify; void *data; void *gpio_private; struct mutex mutex; }; struct gpio_runtime { /* to be assigned by fabric */ struct device_node *node; /* since everyone needs this pointer anyway... */ struct gpio_methods *methods; /* to be used by the gpio implementation */ int implementation_private; struct gpio_notification headphone_notify; struct gpio_notification line_in_notify; struct gpio_notification line_out_notify; }; #endif /* __AOA_GPIO_H */
null
null
null
null
78,542
14,409
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
179,404
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __iop_fifo_out_extra_defs_h #define __iop_fifo_out_extra_defs_h /* * This file is autogenerated from * file: ../../inst/io_proc/rtl/iop_fifo_out_extra.r * id: <not found> * last modfied: Mon Apr 11 16:10:10 2005 * * by /n/asic/design/tools/rdesc/src/rdes2c --outfile iop_fifo_out_extra_defs.h ../../inst/io_proc/rtl/iop_fifo_out_extra.r * id: $Id: iop_fifo_out_extra_defs.h,v 1.1 2005/04/24 18:31:05 starvik Exp $ * Any changes here will be lost. * * -*- buffer-read-only: t -*- */ /* Main access macros */ #ifndef REG_RD #define REG_RD( scope, inst, reg ) \ REG_READ( reg_##scope##_##reg, \ (inst) + REG_RD_ADDR_##scope##_##reg ) #endif #ifndef REG_WR #define REG_WR( scope, inst, reg, val ) \ REG_WRITE( reg_##scope##_##reg, \ (inst) + REG_WR_ADDR_##scope##_##reg, (val) ) #endif #ifndef REG_RD_VECT #define REG_RD_VECT( scope, inst, reg, index ) \ REG_READ( reg_##scope##_##reg, \ (inst) + REG_RD_ADDR_##scope##_##reg + \ (index) * STRIDE_##scope##_##reg ) #endif #ifndef REG_WR_VECT #define REG_WR_VECT( scope, inst, reg, index, val ) \ REG_WRITE( reg_##scope##_##reg, \ (inst) + REG_WR_ADDR_##scope##_##reg + \ (index) * STRIDE_##scope##_##reg, (val) ) #endif #ifndef REG_RD_INT #define REG_RD_INT( scope, inst, reg ) \ REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg ) #endif #ifndef REG_WR_INT #define REG_WR_INT( scope, inst, reg, val ) \ REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg, (val) ) #endif #ifndef REG_RD_INT_VECT #define REG_RD_INT_VECT( scope, inst, reg, index ) \ REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg + \ (index) * STRIDE_##scope##_##reg ) #endif #ifndef REG_WR_INT_VECT #define REG_WR_INT_VECT( scope, inst, reg, index, val ) \ REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg + \ (index) * STRIDE_##scope##_##reg, (val) ) #endif #ifndef REG_TYPE_CONV #define REG_TYPE_CONV( type, orgtype, val ) \ ( { union { orgtype o; type n; } r; r.o = val; r.n; } ) #endif #ifndef reg_page_size #define reg_page_size 8192 #endif #ifndef REG_ADDR #define REG_ADDR( scope, inst, reg ) \ ( (inst) + REG_RD_ADDR_##scope##_##reg ) #endif #ifndef REG_ADDR_VECT #define REG_ADDR_VECT( scope, inst, reg, index ) \ ( (inst) + REG_RD_ADDR_##scope##_##reg + \ (index) * STRIDE_##scope##_##reg ) #endif /* C-code for register scope iop_fifo_out_extra */ /* Register rs_rd_data, scope iop_fifo_out_extra, type rs */ typedef unsigned int reg_iop_fifo_out_extra_rs_rd_data; #define REG_RD_ADDR_iop_fifo_out_extra_rs_rd_data 0 /* Register r_rd_data, scope iop_fifo_out_extra, type r */ typedef unsigned int reg_iop_fifo_out_extra_r_rd_data; #define REG_RD_ADDR_iop_fifo_out_extra_r_rd_data 4 /* Register r_stat, scope iop_fifo_out_extra, type r */ typedef struct { unsigned int avail_bytes : 4; unsigned int last : 8; unsigned int dif_in_en : 1; unsigned int dif_out_en : 1; unsigned int zero_data_last : 1; unsigned int dummy1 : 17; } reg_iop_fifo_out_extra_r_stat; #define REG_RD_ADDR_iop_fifo_out_extra_r_stat 8 /* Register rw_strb_dif_out, scope iop_fifo_out_extra, type rw */ typedef unsigned int reg_iop_fifo_out_extra_rw_strb_dif_out; #define REG_RD_ADDR_iop_fifo_out_extra_rw_strb_dif_out 12 #define REG_WR_ADDR_iop_fifo_out_extra_rw_strb_dif_out 12 /* Register rw_intr_mask, scope iop_fifo_out_extra, type rw */ typedef struct { unsigned int urun : 1; unsigned int last_data : 1; unsigned int dav : 1; unsigned int free : 1; unsigned int orun : 1; unsigned int dummy1 : 27; } reg_iop_fifo_out_extra_rw_intr_mask; #define REG_RD_ADDR_iop_fifo_out_extra_rw_intr_mask 16 #define REG_WR_ADDR_iop_fifo_out_extra_rw_intr_mask 16 /* Register rw_ack_intr, scope iop_fifo_out_extra, type rw */ typedef struct { unsigned int urun : 1; unsigned int last_data : 1; unsigned int dav : 1; unsigned int free : 1; unsigned int orun : 1; unsigned int dummy1 : 27; } reg_iop_fifo_out_extra_rw_ack_intr; #define REG_RD_ADDR_iop_fifo_out_extra_rw_ack_intr 20 #define REG_WR_ADDR_iop_fifo_out_extra_rw_ack_intr 20 /* Register r_intr, scope iop_fifo_out_extra, type r */ typedef struct { unsigned int urun : 1; unsigned int last_data : 1; unsigned int dav : 1; unsigned int free : 1; unsigned int orun : 1; unsigned int dummy1 : 27; } reg_iop_fifo_out_extra_r_intr; #define REG_RD_ADDR_iop_fifo_out_extra_r_intr 24 /* Register r_masked_intr, scope iop_fifo_out_extra, type r */ typedef struct { unsigned int urun : 1; unsigned int last_data : 1; unsigned int dav : 1; unsigned int free : 1; unsigned int orun : 1; unsigned int dummy1 : 27; } reg_iop_fifo_out_extra_r_masked_intr; #define REG_RD_ADDR_iop_fifo_out_extra_r_masked_intr 28 /* Constants */ enum { regk_iop_fifo_out_extra_no = 0x00000000, regk_iop_fifo_out_extra_rw_intr_mask_default = 0x00000000, regk_iop_fifo_out_extra_yes = 0x00000001 }; #endif /* __iop_fifo_out_extra_defs_h */
null
null
null
null
87,751
2,744
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
265,312
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
/* * QEMU KVM support * * Copyright (C) 2006-2008 Qumranet Technologies * Copyright IBM, Corp. 2008 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu/osdep.h" #include "qapi/error.h" #include <sys/ioctl.h> #include <sys/utsname.h> #include <linux/kvm.h> #include <linux/kvm_para.h> #include "qemu-common.h" #include "cpu.h" #include "sysemu/sysemu.h" #include "sysemu/kvm_int.h" #include "kvm_i386.h" #include "hyperv.h" #include "exec/gdbstub.h" #include "qemu/host-utils.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "hw/i386/pc.h" #include "hw/i386/apic.h" #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" #include "hw/i386/intel_iommu.h" #include "hw/i386/x86-iommu.h" #include "exec/ioport.h" #include "standard-headers/asm-x86/hyperv.h" #include "hw/pci/pci.h" #include "hw/pci/msi.h" #include "migration/migration.h" #include "exec/memattrs.h" #include "trace.h" //#define DEBUG_KVM #ifdef DEBUG_KVM #define DPRINTF(fmt, ...) \ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) #else #define DPRINTF(fmt, ...) \ do { } while (0) #endif #define MSR_KVM_WALL_CLOCK 0x11 #define MSR_KVM_SYSTEM_TIME 0x12 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus * 255 kvm_msr_entry structs */ #define MSR_BUF_SIZE 4096 #ifndef BUS_MCEERR_AR #define BUS_MCEERR_AR 4 #endif #ifndef BUS_MCEERR_AO #define BUS_MCEERR_AO 5 #endif const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_INFO(SET_TSS_ADDR), KVM_CAP_INFO(EXT_CPUID), KVM_CAP_INFO(MP_STATE), KVM_CAP_LAST_INFO }; static bool has_msr_star; static bool has_msr_hsave_pa; static bool has_msr_tsc_aux; static bool has_msr_tsc_adjust; static bool has_msr_tsc_deadline; static bool has_msr_feature_control; static bool has_msr_misc_enable; static bool has_msr_smbase; static bool has_msr_bndcfgs; static int lm_capable_kernel; static bool has_msr_hv_hypercall; static bool has_msr_hv_crash; static bool has_msr_hv_reset; static bool has_msr_hv_vpindex; static bool has_msr_hv_runtime; static bool has_msr_hv_synic; static bool has_msr_hv_stimer; static bool has_msr_xss; static bool has_msr_architectural_pmu; static uint32_t num_architectural_pmu_counters; static int has_xsave; static int has_xcrs; static int has_pit_state2; static bool has_msr_mcg_ext_ctl; static struct kvm_cpuid2 *cpuid_cache; int kvm_has_pit_state2(void) { return has_pit_state2; } bool kvm_has_smm(void) { return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM); } bool kvm_allows_irq0_override(void) { return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing(); } static bool kvm_x2apic_api_set_flags(uint64_t flags) { KVMState *s = KVM_STATE(current_machine->accelerator); return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags); } #define MEMORIZE(fn) \ ({ \ static typeof(fn) _result; \ static bool _memorized; \ \ if (_memorized) { \ return _result; \ } \ _memorized = true; \ _result = fn; \ }) bool kvm_enable_x2apic(void) { return MEMORIZE( kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS | KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)); } static int kvm_get_tsc(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; struct { struct kvm_msrs info; struct kvm_msr_entry entries[1]; } msr_data; int ret; if (env->tsc_valid) { return 0; } msr_data.info.nmsrs = 1; msr_data.entries[0].index = MSR_IA32_TSC; env->tsc_valid = !runstate_is_running(); ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); if (ret < 0) { return ret; } assert(ret == 1); env->tsc = msr_data.entries[0].data; return 0; } static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg) { kvm_get_tsc(cpu); } void kvm_synchronize_all_tsc(void) { CPUState *cpu; if (kvm_enabled()) { CPU_FOREACH(cpu) { run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL); } } } static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) { struct kvm_cpuid2 *cpuid; int r, size; size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); cpuid = g_malloc0(size); cpuid->nent = max; r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); if (r == 0 && cpuid->nent >= max) { r = -E2BIG; } if (r < 0) { if (r == -E2BIG) { g_free(cpuid); return NULL; } else { fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n", strerror(-r)); exit(1); } } return cpuid; } /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough * for all entries. */ static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s) { struct kvm_cpuid2 *cpuid; int max = 1; if (cpuid_cache != NULL) { return cpuid_cache; } while ((cpuid = try_get_cpuid(s, max)) == NULL) { max *= 2; } cpuid_cache = cpuid; return cpuid; } static const struct kvm_para_features { int cap; int feature; } para_features[] = { { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE }, { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY }, { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP }, { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF }, }; static int get_para_features(KVMState *s) { int i, features = 0; for (i = 0; i < ARRAY_SIZE(para_features); i++) { if (kvm_check_extension(s, para_features[i].cap)) { features |= (1 << para_features[i].feature); } } return features; } /* Returns the value for a specific register on the cpuid entry */ static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) { uint32_t ret = 0; switch (reg) { case R_EAX: ret = entry->eax; break; case R_EBX: ret = entry->ebx; break; case R_ECX: ret = entry->ecx; break; case R_EDX: ret = entry->edx; break; } return ret; } /* Find matching entry for function/index on kvm_cpuid2 struct */ static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, uint32_t function, uint32_t index) { int i; for (i = 0; i < cpuid->nent; ++i) { if (cpuid->entries[i].function == function && cpuid->entries[i].index == index) { return &cpuid->entries[i]; } } /* not found: */ return NULL; } uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, uint32_t index, int reg) { struct kvm_cpuid2 *cpuid; uint32_t ret = 0; uint32_t cpuid_1_edx; bool found = false; cpuid = get_supported_cpuid(s); struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index); if (entry) { found = true; ret = cpuid_entry_get_reg(entry, reg); } /* Fixups for the data returned by KVM, below */ if (function == 1 && reg == R_EDX) { /* KVM before 2.6.30 misreports the following features */ ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; } else if (function == 1 && reg == R_ECX) { /* We can set the hypervisor flag, even if KVM does not return it on * GET_SUPPORTED_CPUID */ ret |= CPUID_EXT_HYPERVISOR; /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER, * and the irqchip is in the kernel. */ if (kvm_irqchip_in_kernel() && kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { ret |= CPUID_EXT_TSC_DEADLINE_TIMER; } /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled * without the in-kernel irqchip */ if (!kvm_irqchip_in_kernel()) { ret &= ~CPUID_EXT_X2APIC; } } else if (function == 6 && reg == R_EAX) { ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ } else if (function == 0x80000001 && reg == R_EDX) { /* On Intel, kvm returns cpuid according to the Intel spec, * so add missing bits according to the AMD spec: */ cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) { /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't * be enabled without the in-kernel irqchip */ if (!kvm_irqchip_in_kernel()) { ret &= ~(1U << KVM_FEATURE_PV_UNHALT); } } /* fallback for older kernels */ if ((function == KVM_CPUID_FEATURES) && !found) { ret = get_para_features(s); } return ret; } typedef struct HWPoisonPage { ram_addr_t ram_addr; QLIST_ENTRY(HWPoisonPage) list; } HWPoisonPage; static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list = QLIST_HEAD_INITIALIZER(hwpoison_page_list); static void kvm_unpoison_all(void *param) { HWPoisonPage *page, *next_page; QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) { QLIST_REMOVE(page, list); qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE); g_free(page); } } static void kvm_hwpoison_page_add(ram_addr_t ram_addr) { HWPoisonPage *page; QLIST_FOREACH(page, &hwpoison_page_list, list) { if (page->ram_addr == ram_addr) { return; } } page = g_new(HWPoisonPage, 1); page->ram_addr = ram_addr; QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); } static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, int *max_banks) { int r; r = kvm_check_extension(s, KVM_CAP_MCE); if (r > 0) { *max_banks = r; return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); } return -ENOSYS; } static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; uint64_t mcg_status = MCG_STATUS_MCIP; int flags = 0; if (code == BUS_MCEERR_AR) { status |= MCI_STATUS_AR | 0x134; mcg_status |= MCG_STATUS_EIPV; } else { status |= 0xc0; mcg_status |= MCG_STATUS_RIPV; } flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0; /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the * guest kernel back into env->mcg_ext_ctl. */ cpu_synchronize_state(cs); if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) { mcg_status |= MCG_STATUS_LMCE; flags = 0; } cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr, (MCM_ADDR_PHYS << 6) | 0xc, flags); } static void hardware_memory_error(void) { fprintf(stderr, "Hardware memory error!\n"); exit(1); } int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) { X86CPU *cpu = X86_CPU(c); CPUX86State *env = &cpu->env; ram_addr_t ram_addr; hwaddr paddr; if ((env->mcg_cap & MCG_SER_P) && addr && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) { ram_addr = qemu_ram_addr_from_host(addr); if (ram_addr == RAM_ADDR_INVALID || !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { fprintf(stderr, "Hardware memory error for memory used by " "QEMU itself instead of guest system!\n"); /* Hope we are lucky for AO MCE */ if (code == BUS_MCEERR_AO) { return 0; } else { hardware_memory_error(); } } kvm_hwpoison_page_add(ram_addr); kvm_mce_inject(cpu, paddr, code); } else { if (code == BUS_MCEERR_AO) { return 0; } else if (code == BUS_MCEERR_AR) { hardware_memory_error(); } else { return 1; } } return 0; } int kvm_arch_on_sigbus(int code, void *addr) { X86CPU *cpu = X86_CPU(first_cpu); if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { ram_addr_t ram_addr; hwaddr paddr; /* Hope we are lucky for AO MCE */ ram_addr = qemu_ram_addr_from_host(addr); if (ram_addr == RAM_ADDR_INVALID || !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr, &paddr)) { fprintf(stderr, "Hardware memory error for memory used by " "QEMU itself instead of guest system!: %p\n", addr); return 0; } kvm_hwpoison_page_add(ram_addr); kvm_mce_inject(X86_CPU(first_cpu), paddr, code); } else { if (code == BUS_MCEERR_AO) { return 0; } else if (code == BUS_MCEERR_AR) { hardware_memory_error(); } else { return 1; } } return 0; } static int kvm_inject_mce_oldstyle(X86CPU *cpu) { CPUX86State *env = &cpu->env; if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) { unsigned int bank, bank_num = env->mcg_cap & 0xff; struct kvm_x86_mce mce; env->exception_injected = -1; /* * There must be at least one bank in use if an MCE is pending. * Find it and use its values for the event injection. */ for (bank = 0; bank < bank_num; bank++) { if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { break; } } assert(bank < bank_num); mce.bank = bank; mce.status = env->mce_banks[bank * 4 + 1]; mce.mcg_status = env->mcg_status; mce.addr = env->mce_banks[bank * 4 + 2]; mce.misc = env->mce_banks[bank * 4 + 3]; return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce); } return 0; } static void cpu_update_state(void *opaque, int running, RunState state) { CPUX86State *env = opaque; if (running) { env->tsc_valid = false; } } unsigned long kvm_arch_vcpu_id(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); return cpu->apic_id; } #ifndef KVM_CPUID_SIGNATURE_NEXT #define KVM_CPUID_SIGNATURE_NEXT 0x40000100 #endif static bool hyperv_hypercall_available(X86CPU *cpu) { return cpu->hyperv_vapic || (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY); } static bool hyperv_enabled(X86CPU *cpu) { CPUState *cs = CPU(cpu); return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 && (hyperv_hypercall_available(cpu) || cpu->hyperv_time || cpu->hyperv_relaxed_timing || cpu->hyperv_crash || cpu->hyperv_reset || cpu->hyperv_vpindex || cpu->hyperv_runtime || cpu->hyperv_synic || cpu->hyperv_stimer); } static int kvm_arch_set_tsc_khz(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; int r; if (!env->tsc_khz) { return 0; } r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ? kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : -ENOTSUP; if (r < 0) { /* When KVM_SET_TSC_KHZ fails, it's an error only if the current * TSC frequency doesn't match the one we want. */ int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; if (cur_freq <= 0 || cur_freq != env->tsc_khz) { error_report("warning: TSC frequency mismatch between " "VM (%" PRId64 " kHz) and host (%d kHz), " "and TSC scaling unavailable", env->tsc_khz, cur_freq); return r; } } return 0; } static int hyperv_handle_properties(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; if (cpu->hyperv_time && kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) { cpu->hyperv_time = false; } if (cpu->hyperv_relaxed_timing) { env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE; } if (cpu->hyperv_vapic) { env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE; env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE; } if (cpu->hyperv_time) { env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE; env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE; env->features[FEAT_HYPERV_EAX] |= 0x200; } if (cpu->hyperv_crash && has_msr_hv_crash) { env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE; } env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE; if (cpu->hyperv_reset && has_msr_hv_reset) { env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE; } if (cpu->hyperv_vpindex && has_msr_hv_vpindex) { env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE; } if (cpu->hyperv_runtime && has_msr_hv_runtime) { env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE; } if (cpu->hyperv_synic) { int sint; if (!has_msr_hv_synic || kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) { fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n"); return -ENOSYS; } env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE; env->msr_hv_synic_version = HV_SYNIC_VERSION_1; for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) { env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED; } } if (cpu->hyperv_stimer) { if (!has_msr_hv_stimer) { fprintf(stderr, "Hyper-V timers aren't supported by kernel\n"); return -ENOSYS; } env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE; } return 0; } static Error *invtsc_mig_blocker; #define KVM_MAX_CPUID_ENTRIES 100 int kvm_arch_init_vcpu(CPUState *cs) { struct { struct kvm_cpuid2 cpuid; struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; } QEMU_PACKED cpuid_data; X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; uint32_t limit, i, j, cpuid_i; uint32_t unused; struct kvm_cpuid_entry2 *c; uint32_t signature[3]; int kvm_base = KVM_CPUID_SIGNATURE; int r; memset(&cpuid_data, 0, sizeof(cpuid_data)); cpuid_i = 0; /* Paravirtualization CPUIDs */ if (hyperv_enabled(cpu)) { c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS; if (!cpu->hyperv_vendor_id) { memcpy(signature, "Microsoft Hv", 12); } else { size_t len = strlen(cpu->hyperv_vendor_id); if (len > 12) { error_report("hv-vendor-id truncated to 12 characters"); len = 12; } memset(signature, 0, 12); memcpy(signature, cpu->hyperv_vendor_id, len); } c->eax = HYPERV_CPUID_MIN; c->ebx = signature[0]; c->ecx = signature[1]; c->edx = signature[2]; c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_INTERFACE; memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); c->eax = signature[0]; c->ebx = 0; c->ecx = 0; c->edx = 0; c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_VERSION; c->eax = 0x00001bbc; c->ebx = 0x00060001; c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_FEATURES; r = hyperv_handle_properties(cs); if (r) { return r; } c->eax = env->features[FEAT_HYPERV_EAX]; c->ebx = env->features[FEAT_HYPERV_EBX]; c->edx = env->features[FEAT_HYPERV_EDX]; c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_ENLIGHTMENT_INFO; if (cpu->hyperv_relaxed_timing) { c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; } if (cpu->hyperv_vapic) { c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; } c->ebx = cpu->hyperv_spinlock_attempts; c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_IMPLEMENT_LIMITS; c->eax = 0x40; c->ebx = 0x40; kvm_base = KVM_CPUID_SIGNATURE_NEXT; has_msr_hv_hypercall = true; } if (cpu->expose_kvm) { memcpy(signature, "KVMKVMKVM\0\0\0", 12); c = &cpuid_data.entries[cpuid_i++]; c->function = KVM_CPUID_SIGNATURE | kvm_base; c->eax = KVM_CPUID_FEATURES | kvm_base; c->ebx = signature[0]; c->ecx = signature[1]; c->edx = signature[2]; c = &cpuid_data.entries[cpuid_i++]; c->function = KVM_CPUID_FEATURES | kvm_base; c->eax = env->features[FEAT_KVM]; } cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); for (i = 0; i <= limit; i++) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "unsupported level value: 0x%x\n", limit); abort(); } c = &cpuid_data.entries[cpuid_i++]; switch (i) { case 2: { /* Keep reading function 2 till all the input is received */ int times; c->function = i; c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | KVM_CPUID_FLAG_STATE_READ_NEXT; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); times = c->eax & 0xff; for (j = 1; j < times; ++j) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "cpuid_data is full, no space for " "cpuid(eax:2):eax & 0xf = 0x%x\n", times); abort(); } c = &cpuid_data.entries[cpuid_i++]; c->function = i; c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); } break; } case 4: case 0xb: case 0xd: for (j = 0; ; j++) { if (i == 0xd && j == 64) { break; } c->function = i; c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; c->index = j; cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); if (i == 4 && c->eax == 0) { break; } if (i == 0xb && !(c->ecx & 0xff00)) { break; } if (i == 0xd && c->eax == 0) { continue; } if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "cpuid_data is full, no space for " "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); abort(); } c = &cpuid_data.entries[cpuid_i++]; } break; default: c->function = i; c->flags = 0; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); break; } } if (limit >= 0x0a) { uint32_t ver; cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused); if ((ver & 0xff) > 0) { has_msr_architectural_pmu = true; num_architectural_pmu_counters = (ver & 0xff00) >> 8; /* Shouldn't be more than 32, since that's the number of bits * available in EBX to tell us _which_ counters are available. * Play it safe. */ if (num_architectural_pmu_counters > MAX_GP_COUNTERS) { num_architectural_pmu_counters = MAX_GP_COUNTERS; } } } cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); for (i = 0x80000000; i <= limit; i++) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit); abort(); } c = &cpuid_data.entries[cpuid_i++]; c->function = i; c->flags = 0; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); } /* Call Centaur's CPUID instructions they are supported. */ if (env->cpuid_xlevel2 > 0) { cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); for (i = 0xC0000000; i <= limit; i++) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit); abort(); } c = &cpuid_data.entries[cpuid_i++]; c->function = i; c->flags = 0; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); } } cpuid_data.cpuid.nent = cpuid_i; if (((env->cpuid_version >> 8)&0xF) >= 6 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == (CPUID_MCE | CPUID_MCA) && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) { uint64_t mcg_cap, unsupported_caps; int banks; int ret; ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks); if (ret < 0) { fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret)); return ret; } if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) { error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)", (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks); return -ENOTSUP; } unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK); if (unsupported_caps) { if (unsupported_caps & MCG_LMCE_P) { error_report("kvm: LMCE not supported"); return -ENOTSUP; } error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64, unsupported_caps); } env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK; ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap); if (ret < 0) { fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret)); return ret; } } qemu_add_vm_change_state_handler(cpu_update_state, env); c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); if (c) { has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || !!(c->ecx & CPUID_EXT_SMX); } if (env->mcg_cap & MCG_LMCE_P) { has_msr_mcg_ext_ctl = has_msr_feature_control = true; } c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0); if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) { /* for migration */ error_setg(&invtsc_mig_blocker, "State blocked by non-migratable CPU device" " (invtsc flag)"); migrate_add_blocker(invtsc_mig_blocker); /* for savevm */ vmstate_x86_cpu.unmigratable = 1; } cpuid_data.cpuid.padding = 0; r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); if (r) { return r; } r = kvm_arch_set_tsc_khz(cs); if (r < 0) { return r; } /* vcpu's TSC frequency is either specified by user, or following * the value used by KVM if the former is not present. In the * latter case, we query it from KVM and record in env->tsc_khz, * so that vcpu's TSC frequency can be migrated later via this field. */ if (!env->tsc_khz) { r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; if (r > 0) { env->tsc_khz = r; } } if (has_xsave) { env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); } cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) { has_msr_tsc_aux = false; } return 0; } void kvm_arch_reset_vcpu(X86CPU *cpu) { CPUX86State *env = &cpu->env; env->exception_injected = -1; env->interrupt_injected = -1; env->xcr0 = 1; if (kvm_irqchip_in_kernel()) { env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE : KVM_MP_STATE_UNINITIALIZED; } else { env->mp_state = KVM_MP_STATE_RUNNABLE; } } void kvm_arch_do_init_vcpu(X86CPU *cpu) { CPUX86State *env = &cpu->env; /* APs get directly into wait-for-SIPI state. */ if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) { env->mp_state = KVM_MP_STATE_INIT_RECEIVED; } } static int kvm_get_supported_msrs(KVMState *s) { static int kvm_supported_msrs; int ret = 0; /* first time */ if (kvm_supported_msrs == 0) { struct kvm_msr_list msr_list, *kvm_msr_list; kvm_supported_msrs = -1; /* Obtain MSR list from KVM. These are the MSRs that we must * save/restore */ msr_list.nmsrs = 0; ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); if (ret < 0 && ret != -E2BIG) { return ret; } /* Old kernel modules had a bug and could write beyond the provided memory. Allocate at least a safe amount of 1K. */ kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + msr_list.nmsrs * sizeof(msr_list.indices[0]))); kvm_msr_list->nmsrs = msr_list.nmsrs; ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); if (ret >= 0) { int i; for (i = 0; i < kvm_msr_list->nmsrs; i++) { if (kvm_msr_list->indices[i] == MSR_STAR) { has_msr_star = true; continue; } if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) { has_msr_hsave_pa = true; continue; } if (kvm_msr_list->indices[i] == MSR_TSC_AUX) { has_msr_tsc_aux = true; continue; } if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) { has_msr_tsc_adjust = true; continue; } if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) { has_msr_tsc_deadline = true; continue; } if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) { has_msr_smbase = true; continue; } if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) { has_msr_misc_enable = true; continue; } if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) { has_msr_bndcfgs = true; continue; } if (kvm_msr_list->indices[i] == MSR_IA32_XSS) { has_msr_xss = true; continue; } if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) { has_msr_hv_crash = true; continue; } if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) { has_msr_hv_reset = true; continue; } if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) { has_msr_hv_vpindex = true; continue; } if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) { has_msr_hv_runtime = true; continue; } if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) { has_msr_hv_synic = true; continue; } if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) { has_msr_hv_stimer = true; continue; } } } g_free(kvm_msr_list); } return ret; } static Notifier smram_machine_done; static KVMMemoryListener smram_listener; static AddressSpace smram_address_space; static MemoryRegion smram_as_root; static MemoryRegion smram_as_mem; static void register_smram_listener(Notifier *n, void *unused) { MemoryRegion *smram = (MemoryRegion *) object_resolve_path("/machine/smram", NULL); /* Outer container... */ memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull); memory_region_set_enabled(&smram_as_root, true); /* ... with two regions inside: normal system memory with low * priority, and... */ memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram", get_system_memory(), 0, ~0ull); memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0); memory_region_set_enabled(&smram_as_mem, true); if (smram) { /* ... SMRAM with higher priority */ memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10); memory_region_set_enabled(smram, true); } address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); kvm_memory_listener_register(kvm_state, &smram_listener, &smram_address_space, 1); } int kvm_arch_init(MachineState *ms, KVMState *s) { uint64_t identity_base = 0xfffbc000; uint64_t shadow_mem; int ret; struct utsname utsname; #ifdef KVM_CAP_XSAVE has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE); #endif #ifdef KVM_CAP_XCRS has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); #endif #ifdef KVM_CAP_PIT_STATE2 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); #endif ret = kvm_get_supported_msrs(s); if (ret < 0) { return ret; } uname(&utsname); lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; /* * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. * In order to use vm86 mode, an EPT identity map and a TSS are needed. * Since these must be part of guest physical memory, we need to allocate * them, both by setting their start addresses in the kernel and by * creating a corresponding e820 entry. We need 4 pages before the BIOS. * * Older KVM versions may not support setting the identity map base. In * that case we need to stick with the default, i.e. a 256K maximum BIOS * size. */ if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) { /* Allows up to 16M BIOSes. */ identity_base = 0xfeffc000; ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); if (ret < 0) { return ret; } } /* Set TSS base one page after EPT identity map. */ ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000); if (ret < 0) { return ret; } /* Tell fw_cfg to notify the BIOS to reserve the range. */ ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED); if (ret < 0) { fprintf(stderr, "e820_add_entry() table is full\n"); return ret; } qemu_register_reset(kvm_unpoison_all, NULL); shadow_mem = machine_kvm_shadow_mem(ms); if (shadow_mem != -1) { shadow_mem /= 4096; ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); if (ret < 0) { return ret; } } if (kvm_check_extension(s, KVM_CAP_X86_SMM)) { smram_machine_done.notify = register_smram_listener; qemu_add_machine_init_done_notifier(&smram_machine_done); } return 0; } static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) { lhs->selector = rhs->selector; lhs->base = rhs->base; lhs->limit = rhs->limit; lhs->type = 3; lhs->present = 1; lhs->dpl = 3; lhs->db = 0; lhs->s = 1; lhs->l = 0; lhs->g = 0; lhs->avl = 0; lhs->unusable = 0; } static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) { unsigned flags = rhs->flags; lhs->selector = rhs->selector; lhs->base = rhs->base; lhs->limit = rhs->limit; lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; lhs->present = (flags & DESC_P_MASK) != 0; lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3; lhs->db = (flags >> DESC_B_SHIFT) & 1; lhs->s = (flags & DESC_S_MASK) != 0; lhs->l = (flags >> DESC_L_SHIFT) & 1; lhs->g = (flags & DESC_G_MASK) != 0; lhs->avl = (flags & DESC_AVL_MASK) != 0; lhs->unusable = !lhs->present; lhs->padding = 0; } static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) { lhs->selector = rhs->selector; lhs->base = rhs->base; lhs->limit = rhs->limit; if (rhs->unusable) { lhs->flags = 0; } else { lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | (rhs->present * DESC_P_MASK) | (rhs->dpl << DESC_DPL_SHIFT) | (rhs->db << DESC_B_SHIFT) | (rhs->s * DESC_S_MASK) | (rhs->l << DESC_L_SHIFT) | (rhs->g * DESC_G_MASK) | (rhs->avl * DESC_AVL_MASK); } } static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) { if (set) { *kvm_reg = *qemu_reg; } else { *qemu_reg = *kvm_reg; } } static int kvm_getput_regs(X86CPU *cpu, int set) { CPUX86State *env = &cpu->env; struct kvm_regs regs; int ret = 0; if (!set) { ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs); if (ret < 0) { return ret; } } kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set); kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set); kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set); kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set); kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set); kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set); kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set); kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set); #ifdef TARGET_X86_64 kvm_getput_reg(&regs.r8, &env->regs[8], set); kvm_getput_reg(&regs.r9, &env->regs[9], set); kvm_getput_reg(&regs.r10, &env->regs[10], set); kvm_getput_reg(&regs.r11, &env->regs[11], set); kvm_getput_reg(&regs.r12, &env->regs[12], set); kvm_getput_reg(&regs.r13, &env->regs[13], set); kvm_getput_reg(&regs.r14, &env->regs[14], set); kvm_getput_reg(&regs.r15, &env->regs[15], set); #endif kvm_getput_reg(&regs.rflags, &env->eflags, set); kvm_getput_reg(&regs.rip, &env->eip, set); if (set) { ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs); } return ret; } static int kvm_put_fpu(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_fpu fpu; int i; memset(&fpu, 0, sizeof fpu); fpu.fsw = env->fpus & ~(7 << 11); fpu.fsw |= (env->fpstt & 7) << 11; fpu.fcw = env->fpuc; fpu.last_opcode = env->fpop; fpu.last_ip = env->fpip; fpu.last_dp = env->fpdp; for (i = 0; i < 8; ++i) { fpu.ftwx |= (!env->fptags[i]) << i; } memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); for (i = 0; i < CPU_NB_REGS; i++) { stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0)); stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1)); } fpu.mxcsr = env->mxcsr; return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu); } #define XSAVE_FCW_FSW 0 #define XSAVE_FTW_FOP 1 #define XSAVE_CWD_RIP 2 #define XSAVE_CWD_RDP 4 #define XSAVE_MXCSR 6 #define XSAVE_ST_SPACE 8 #define XSAVE_XMM_SPACE 40 #define XSAVE_XSTATE_BV 128 #define XSAVE_YMMH_SPACE 144 #define XSAVE_BNDREGS 240 #define XSAVE_BNDCSR 256 #define XSAVE_OPMASK 272 #define XSAVE_ZMM_Hi256 288 #define XSAVE_Hi16_ZMM 416 #define XSAVE_PKRU 672 #define XSAVE_BYTE_OFFSET(word_offset) \ ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0])) #define ASSERT_OFFSET(word_offset, field) \ QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \ offsetof(X86XSaveArea, field)) ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw); ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw); ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip); ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp); ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr); ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs); ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs); ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv); ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state); ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state); ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state); ASSERT_OFFSET(XSAVE_OPMASK, opmask_state); ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state); ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state); ASSERT_OFFSET(XSAVE_PKRU, pkru_state); static int kvm_put_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; X86XSaveArea *xsave = env->kvm_xsave_buf; uint16_t cwd, swd, twd; int i; if (!has_xsave) { return kvm_put_fpu(cpu); } memset(xsave, 0, sizeof(struct kvm_xsave)); twd = 0; swd = env->fpus & ~(7 << 11); swd |= (env->fpstt & 7) << 11; cwd = env->fpuc; for (i = 0; i < 8; ++i) { twd |= (!env->fptags[i]) << i; } xsave->legacy.fcw = cwd; xsave->legacy.fsw = swd; xsave->legacy.ftw = twd; xsave->legacy.fpop = env->fpop; xsave->legacy.fpip = env->fpip; xsave->legacy.fpdp = env->fpdp; memcpy(&xsave->legacy.fpregs, env->fpregs, sizeof env->fpregs); xsave->legacy.mxcsr = env->mxcsr; xsave->header.xstate_bv = env->xstate_bv; memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs, sizeof env->bnd_regs); xsave->bndcsr_state.bndcsr = env->bndcs_regs; memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs, sizeof env->opmask_regs); for (i = 0; i < CPU_NB_REGS; i++) { uint8_t *xmm = xsave->legacy.xmm_regs[i]; uint8_t *ymmh = xsave->avx_state.ymmh[i]; uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i]; stq_p(xmm, env->xmm_regs[i].ZMM_Q(0)); stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1)); stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2)); stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3)); stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4)); stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5)); stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6)); stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7)); } #ifdef TARGET_X86_64 memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16], 16 * sizeof env->xmm_regs[16]); memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru); #endif return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); } static int kvm_put_xcrs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_xcrs xcrs = {}; if (!has_xcrs) { return 0; } xcrs.nr_xcrs = 1; xcrs.flags = 0; xcrs.xcrs[0].xcr = 0; xcrs.xcrs[0].value = env->xcr0; return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs); } static int kvm_put_sregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_sregs sregs; memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); if (env->interrupt_injected >= 0) { sregs.interrupt_bitmap[env->interrupt_injected / 64] |= (uint64_t)1 << (env->interrupt_injected % 64); } if ((env->eflags & VM_MASK)) { set_v8086_seg(&sregs.cs, &env->segs[R_CS]); set_v8086_seg(&sregs.ds, &env->segs[R_DS]); set_v8086_seg(&sregs.es, &env->segs[R_ES]); set_v8086_seg(&sregs.fs, &env->segs[R_FS]); set_v8086_seg(&sregs.gs, &env->segs[R_GS]); set_v8086_seg(&sregs.ss, &env->segs[R_SS]); } else { set_seg(&sregs.cs, &env->segs[R_CS]); set_seg(&sregs.ds, &env->segs[R_DS]); set_seg(&sregs.es, &env->segs[R_ES]); set_seg(&sregs.fs, &env->segs[R_FS]); set_seg(&sregs.gs, &env->segs[R_GS]); set_seg(&sregs.ss, &env->segs[R_SS]); } set_seg(&sregs.tr, &env->tr); set_seg(&sregs.ldt, &env->ldt); sregs.idt.limit = env->idt.limit; sregs.idt.base = env->idt.base; memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); sregs.gdt.limit = env->gdt.limit; sregs.gdt.base = env->gdt.base; memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); sregs.cr0 = env->cr[0]; sregs.cr2 = env->cr[2]; sregs.cr3 = env->cr[3]; sregs.cr4 = env->cr[4]; sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); sregs.apic_base = cpu_get_apic_base(cpu->apic_state); sregs.efer = env->efer; return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); } static void kvm_msr_buf_reset(X86CPU *cpu) { memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); } static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value) { struct kvm_msrs *msrs = cpu->kvm_msr_buf; void *limit = ((void *)msrs) + MSR_BUF_SIZE; struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs]; assert((void *)(entry + 1) <= limit); entry->index = index; entry->reserved = 0; entry->data = value; msrs->nmsrs++; } static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) { kvm_msr_buf_reset(cpu); kvm_msr_entry_add(cpu, index, value); return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); } void kvm_put_apicbase(X86CPU *cpu, uint64_t value) { int ret; ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value); assert(ret == 1); } static int kvm_put_tscdeadline_msr(X86CPU *cpu) { CPUX86State *env = &cpu->env; int ret; if (!has_msr_tsc_deadline) { return 0; } ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline); if (ret < 0) { return ret; } assert(ret == 1); return 0; } /* * Provide a separate write service for the feature control MSR in order to * kick the VCPU out of VMXON or even guest mode on reset. This has to be done * before writing any other state because forcibly leaving nested mode * invalidates the VCPU state. */ static int kvm_put_msr_feature_control(X86CPU *cpu) { int ret; if (!has_msr_feature_control) { return 0; } ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL, cpu->env.msr_ia32_feature_control); if (ret < 0) { return ret; } assert(ret == 1); return 0; } static int kvm_put_msrs(X86CPU *cpu, int level) { CPUX86State *env = &cpu->env; int i; int ret; kvm_msr_buf_reset(cpu); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); kvm_msr_entry_add(cpu, MSR_PAT, env->pat); if (has_msr_star) { kvm_msr_entry_add(cpu, MSR_STAR, env->star); } if (has_msr_hsave_pa) { kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); } if (has_msr_tsc_aux) { kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); } if (has_msr_tsc_adjust) { kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); } if (has_msr_misc_enable) { kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, env->msr_ia32_misc_enable); } if (has_msr_smbase) { kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); } if (has_msr_bndcfgs) { kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); } if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); } #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); } #endif /* * The following MSRs have side effects on the guest or are too heavy * for normal writeback. Limit them to reset or full state updates. */ if (level >= KVM_PUT_RESET_STATE) { kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); } if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); } if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } if (has_msr_architectural_pmu) { /* Stop the counter. */ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); /* Set the counter values. */ for (i = 0; i < MAX_FIXED_COUNTERS; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, env->msr_fixed_counters[i]); } for (i = 0; i < num_architectural_pmu_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, env->msr_gp_counters[i]); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, env->msr_gp_evtsel[i]); } kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, env->msr_global_status); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, env->msr_global_ovf_ctrl); /* Now start the PMU. */ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, env->msr_fixed_ctr_ctrl); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, env->msr_global_ctrl); } if (has_msr_hv_hypercall) { kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, env->msr_hv_guest_os_id); kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, env->msr_hv_hypercall); } if (cpu->hyperv_vapic) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, env->msr_hv_vapic); } if (cpu->hyperv_time) { kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc); } if (has_msr_hv_crash) { int j; for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, env->msr_hv_crash_params[j]); kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_X64_MSR_CRASH_CTL_NOTIFY); } if (has_msr_hv_runtime) { kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); } if (cpu->hyperv_synic) { int j; kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, env->msr_hv_synic_control); kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, env->msr_hv_synic_version); kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, env->msr_hv_synic_evt_page); kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, env->msr_hv_synic_msg_page); for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j, env->msr_hv_synic_sint[j]); } } if (has_msr_hv_stimer) { int j; for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2, env->msr_hv_stimer_config[j]); } for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2, env->msr_hv_stimer_count[j]); } } if (env->features[FEAT_1_EDX] & CPUID_MTRR) { uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); for (i = 0; i < MSR_MTRRcap_VCNT; i++) { /* The CPU GPs if we write to a bit above the physical limit of * the host CPU (and KVM emulates that) */ uint64_t mask = env->mtrr_var[i].mask; mask &= phys_mask; kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), env->mtrr_var[i].base); kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); } } /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ } if (env->mcg_cap) { int i; kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); if (has_msr_mcg_ext_ctl) { kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); } for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); } } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; } assert(ret == cpu->kvm_msr_buf->nmsrs); return 0; } static int kvm_get_fpu(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_fpu fpu; int i, ret; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu); if (ret < 0) { return ret; } env->fpstt = (fpu.fsw >> 11) & 7; env->fpus = fpu.fsw; env->fpuc = fpu.fcw; env->fpop = fpu.last_opcode; env->fpip = fpu.last_ip; env->fpdp = fpu.last_dp; for (i = 0; i < 8; ++i) { env->fptags[i] = !((fpu.ftwx >> i) & 1); } memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); for (i = 0; i < CPU_NB_REGS; i++) { env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]); env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]); } env->mxcsr = fpu.mxcsr; return 0; } static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; X86XSaveArea *xsave = env->kvm_xsave_buf; int ret, i; uint16_t cwd, swd, twd; if (!has_xsave) { return kvm_get_fpu(cpu); } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave); if (ret < 0) { return ret; } cwd = xsave->legacy.fcw; swd = xsave->legacy.fsw; twd = xsave->legacy.ftw; env->fpop = xsave->legacy.fpop; env->fpstt = (swd >> 11) & 7; env->fpus = swd; env->fpuc = cwd; for (i = 0; i < 8; ++i) { env->fptags[i] = !((twd >> i) & 1); } env->fpip = xsave->legacy.fpip; env->fpdp = xsave->legacy.fpdp; env->mxcsr = xsave->legacy.mxcsr; memcpy(env->fpregs, &xsave->legacy.fpregs, sizeof env->fpregs); env->xstate_bv = xsave->header.xstate_bv; memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs, sizeof env->bnd_regs); env->bndcs_regs = xsave->bndcsr_state.bndcsr; memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs, sizeof env->opmask_regs); for (i = 0; i < CPU_NB_REGS; i++) { uint8_t *xmm = xsave->legacy.xmm_regs[i]; uint8_t *ymmh = xsave->avx_state.ymmh[i]; uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i]; env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm); env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8); env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh); env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8); env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh); env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8); env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16); env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24); } #ifdef TARGET_X86_64 memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm, 16 * sizeof env->xmm_regs[16]); memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru); #endif return 0; } static int kvm_get_xcrs(X86CPU *cpu) { CPUX86State *env = &cpu->env; int i, ret; struct kvm_xcrs xcrs; if (!has_xcrs) { return 0; } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs); if (ret < 0) { return ret; } for (i = 0; i < xcrs.nr_xcrs; i++) { /* Only support xcr0 now */ if (xcrs.xcrs[i].xcr == 0) { env->xcr0 = xcrs.xcrs[i].value; break; } } return 0; } static int kvm_get_sregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_sregs sregs; uint32_t hflags; int bit, i, ret; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); if (ret < 0) { return ret; } /* There can only be one pending IRQ set in the bitmap at a time, so try to find it and save its number instead (-1 for none). */ env->interrupt_injected = -1; for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { if (sregs.interrupt_bitmap[i]) { bit = ctz64(sregs.interrupt_bitmap[i]); env->interrupt_injected = i * 64 + bit; break; } } get_seg(&env->segs[R_CS], &sregs.cs); get_seg(&env->segs[R_DS], &sregs.ds); get_seg(&env->segs[R_ES], &sregs.es); get_seg(&env->segs[R_FS], &sregs.fs); get_seg(&env->segs[R_GS], &sregs.gs); get_seg(&env->segs[R_SS], &sregs.ss); get_seg(&env->tr, &sregs.tr); get_seg(&env->ldt, &sregs.ldt); env->idt.limit = sregs.idt.limit; env->idt.base = sregs.idt.base; env->gdt.limit = sregs.gdt.limit; env->gdt.base = sregs.gdt.base; env->cr[0] = sregs.cr0; env->cr[2] = sregs.cr2; env->cr[3] = sregs.cr3; env->cr[4] = sregs.cr4; env->efer = sregs.efer; /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ #define HFLAG_COPY_MASK \ ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) hflags = env->hflags & HFLAG_COPY_MASK; hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); if (env->cr[4] & CR4_OSFXSR_MASK) { hflags |= HF_OSFXSR_MASK; } if (env->efer & MSR_EFER_LMA) { hflags |= HF_LMA_MASK; } if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; } else { hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_CS32_SHIFT); hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_SS32_SHIFT); if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || !(hflags & HF_CS32_MASK)) { hflags |= HF_ADDSEG_MASK; } else { hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; } } env->hflags = hflags; return 0; } static int kvm_get_msrs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; int ret, i; uint64_t mtrr_top_bits; kvm_msr_buf_reset(cpu); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0); kvm_msr_entry_add(cpu, MSR_PAT, 0); if (has_msr_star) { kvm_msr_entry_add(cpu, MSR_STAR, 0); } if (has_msr_hsave_pa) { kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0); } if (has_msr_tsc_aux) { kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0); } if (has_msr_tsc_adjust) { kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0); } if (has_msr_tsc_deadline) { kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0); } if (has_msr_misc_enable) { kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0); } if (has_msr_smbase) { kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0); } if (has_msr_feature_control) { kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0); } if (has_msr_bndcfgs) { kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0); } if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); } if (!env->tsc_valid) { kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); env->tsc_valid = !runstate_is_running(); } #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_add(cpu, MSR_CSTAR, 0); kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); kvm_msr_entry_add(cpu, MSR_FMASK, 0); kvm_msr_entry_add(cpu, MSR_LSTAR, 0); } #endif kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); } if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); } if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } if (has_msr_architectural_pmu) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); for (i = 0; i < MAX_FIXED_COUNTERS; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); } for (i = 0; i < num_architectural_pmu_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); } } if (env->mcg_cap) { kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0); kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0); if (has_msr_mcg_ext_ctl) { kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0); } for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0); } } if (has_msr_hv_hypercall) { kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0); } if (cpu->hyperv_vapic) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0); } if (cpu->hyperv_time) { kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0); } if (has_msr_hv_crash) { int j; for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); } } if (has_msr_hv_runtime) { kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0); } if (cpu->hyperv_synic) { uint32_t msr; kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { kvm_msr_entry_add(cpu, msr, 0); } } if (has_msr_hv_stimer) { uint32_t msr; for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; msr++) { kvm_msr_entry_add(cpu, msr, 0); } } if (env->features[FEAT_1_EDX] & CPUID_MTRR) { kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0); for (i = 0; i < MSR_MTRRcap_VCNT; i++) { kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0); kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0); } } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; } assert(ret == cpu->kvm_msr_buf->nmsrs); /* * MTRR masks: Each mask consists of 5 parts * a 10..0: must be zero * b 11 : valid bit * c n-1.12: actual mask bits * d 51..n: reserved must be zero * e 63.52: reserved must be zero * * 'n' is the number of physical bits supported by the CPU and is * apparently always <= 52. We know our 'n' but don't know what * the destinations 'n' is; it might be smaller, in which case * it masks (c) on loading. It might be larger, in which case * we fill 'd' so that d..c is consistent irrespetive of the 'n' * we're migrating to. */ if (cpu->fill_mtrr_mask) { QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52); assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); } else { mtrr_top_bits = 0; } for (i = 0; i < ret; i++) { uint32_t index = msrs[i].index; switch (index) { case MSR_IA32_SYSENTER_CS: env->sysenter_cs = msrs[i].data; break; case MSR_IA32_SYSENTER_ESP: env->sysenter_esp = msrs[i].data; break; case MSR_IA32_SYSENTER_EIP: env->sysenter_eip = msrs[i].data; break; case MSR_PAT: env->pat = msrs[i].data; break; case MSR_STAR: env->star = msrs[i].data; break; #ifdef TARGET_X86_64 case MSR_CSTAR: env->cstar = msrs[i].data; break; case MSR_KERNELGSBASE: env->kernelgsbase = msrs[i].data; break; case MSR_FMASK: env->fmask = msrs[i].data; break; case MSR_LSTAR: env->lstar = msrs[i].data; break; #endif case MSR_IA32_TSC: env->tsc = msrs[i].data; break; case MSR_TSC_AUX: env->tsc_aux = msrs[i].data; break; case MSR_TSC_ADJUST: env->tsc_adjust = msrs[i].data; break; case MSR_IA32_TSCDEADLINE: env->tsc_deadline = msrs[i].data; break; case MSR_VM_HSAVE_PA: env->vm_hsave = msrs[i].data; break; case MSR_KVM_SYSTEM_TIME: env->system_time_msr = msrs[i].data; break; case MSR_KVM_WALL_CLOCK: env->wall_clock_msr = msrs[i].data; break; case MSR_MCG_STATUS: env->mcg_status = msrs[i].data; break; case MSR_MCG_CTL: env->mcg_ctl = msrs[i].data; break; case MSR_MCG_EXT_CTL: env->mcg_ext_ctl = msrs[i].data; break; case MSR_IA32_MISC_ENABLE: env->msr_ia32_misc_enable = msrs[i].data; break; case MSR_IA32_SMBASE: env->smbase = msrs[i].data; break; case MSR_IA32_FEATURE_CONTROL: env->msr_ia32_feature_control = msrs[i].data; break; case MSR_IA32_BNDCFGS: env->msr_bndcfgs = msrs[i].data; break; case MSR_IA32_XSS: env->xss = msrs[i].data; break; default: if (msrs[i].index >= MSR_MC0_CTL && msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; } break; case MSR_KVM_ASYNC_PF_EN: env->async_pf_en_msr = msrs[i].data; break; case MSR_KVM_PV_EOI_EN: env->pv_eoi_en_msr = msrs[i].data; break; case MSR_KVM_STEAL_TIME: env->steal_time_msr = msrs[i].data; break; case MSR_CORE_PERF_FIXED_CTR_CTRL: env->msr_fixed_ctr_ctrl = msrs[i].data; break; case MSR_CORE_PERF_GLOBAL_CTRL: env->msr_global_ctrl = msrs[i].data; break; case MSR_CORE_PERF_GLOBAL_STATUS: env->msr_global_status = msrs[i].data; break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: env->msr_global_ovf_ctrl = msrs[i].data; break; case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; break; case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; break; case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; break; case HV_X64_MSR_HYPERCALL: env->msr_hv_hypercall = msrs[i].data; break; case HV_X64_MSR_GUEST_OS_ID: env->msr_hv_guest_os_id = msrs[i].data; break; case HV_X64_MSR_APIC_ASSIST_PAGE: env->msr_hv_vapic = msrs[i].data; break; case HV_X64_MSR_REFERENCE_TSC: env->msr_hv_tsc = msrs[i].data; break; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; break; case HV_X64_MSR_VP_RUNTIME: env->msr_hv_runtime = msrs[i].data; break; case HV_X64_MSR_SCONTROL: env->msr_hv_synic_control = msrs[i].data; break; case HV_X64_MSR_SVERSION: env->msr_hv_synic_version = msrs[i].data; break; case HV_X64_MSR_SIEFP: env->msr_hv_synic_evt_page = msrs[i].data; break; case HV_X64_MSR_SIMP: env->msr_hv_synic_msg_page = msrs[i].data; break; case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; break; case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER2_CONFIG: case HV_X64_MSR_STIMER3_CONFIG: env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = msrs[i].data; break; case HV_X64_MSR_STIMER0_COUNT: case HV_X64_MSR_STIMER1_COUNT: case HV_X64_MSR_STIMER2_COUNT: case HV_X64_MSR_STIMER3_COUNT: env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = msrs[i].data; break; case MSR_MTRRdefType: env->mtrr_deftype = msrs[i].data; break; case MSR_MTRRfix64K_00000: env->mtrr_fixed[0] = msrs[i].data; break; case MSR_MTRRfix16K_80000: env->mtrr_fixed[1] = msrs[i].data; break; case MSR_MTRRfix16K_A0000: env->mtrr_fixed[2] = msrs[i].data; break; case MSR_MTRRfix4K_C0000: env->mtrr_fixed[3] = msrs[i].data; break; case MSR_MTRRfix4K_C8000: env->mtrr_fixed[4] = msrs[i].data; break; case MSR_MTRRfix4K_D0000: env->mtrr_fixed[5] = msrs[i].data; break; case MSR_MTRRfix4K_D8000: env->mtrr_fixed[6] = msrs[i].data; break; case MSR_MTRRfix4K_E0000: env->mtrr_fixed[7] = msrs[i].data; break; case MSR_MTRRfix4K_E8000: env->mtrr_fixed[8] = msrs[i].data; break; case MSR_MTRRfix4K_F0000: env->mtrr_fixed[9] = msrs[i].data; break; case MSR_MTRRfix4K_F8000: env->mtrr_fixed[10] = msrs[i].data; break; case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): if (index & 1) { env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | mtrr_top_bits; } else { env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; } break; } } return 0; } static int kvm_put_mp_state(X86CPU *cpu) { struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state }; return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); } static int kvm_get_mp_state(X86CPU *cpu) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; struct kvm_mp_state mp_state; int ret; ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); if (ret < 0) { return ret; } env->mp_state = mp_state.mp_state; if (kvm_irqchip_in_kernel()) { cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); } return 0; } static int kvm_get_apic(X86CPU *cpu) { DeviceState *apic = cpu->apic_state; struct kvm_lapic_state kapic; int ret; if (apic && kvm_irqchip_in_kernel()) { ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic); if (ret < 0) { return ret; } kvm_get_apic_state(apic, &kapic); } return 0; } static int kvm_put_vcpu_events(X86CPU *cpu, int level) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; struct kvm_vcpu_events events = {}; if (!kvm_has_vcpu_events()) { return 0; } events.exception.injected = (env->exception_injected >= 0); events.exception.nr = env->exception_injected; events.exception.has_error_code = env->has_error_code; events.exception.error_code = env->error_code; events.exception.pad = 0; events.interrupt.injected = (env->interrupt_injected >= 0); events.interrupt.nr = env->interrupt_injected; events.interrupt.soft = env->soft_interrupt; events.nmi.injected = env->nmi_injected; events.nmi.pending = env->nmi_pending; events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); events.nmi.pad = 0; events.sipi_vector = env->sipi_vector; events.flags = 0; if (has_msr_smbase) { events.smi.smm = !!(env->hflags & HF_SMM_MASK); events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); if (kvm_irqchip_in_kernel()) { /* As soon as these are moved to the kernel, remove them * from cs->interrupt_request. */ events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); } else { /* Keep these in cs->interrupt_request. */ events.smi.pending = 0; events.smi.latched_init = 0; } events.flags |= KVM_VCPUEVENT_VALID_SMM; } if (level >= KVM_PUT_RESET_STATE) { events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; } return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); } static int kvm_get_vcpu_events(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_vcpu_events events; int ret; if (!kvm_has_vcpu_events()) { return 0; } memset(&events, 0, sizeof(events)); ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); if (ret < 0) { return ret; } env->exception_injected = events.exception.injected ? events.exception.nr : -1; env->has_error_code = events.exception.has_error_code; env->error_code = events.exception.error_code; env->interrupt_injected = events.interrupt.injected ? events.interrupt.nr : -1; env->soft_interrupt = events.interrupt.soft; env->nmi_injected = events.nmi.injected; env->nmi_pending = events.nmi.pending; if (events.nmi.masked) { env->hflags2 |= HF2_NMI_MASK; } else { env->hflags2 &= ~HF2_NMI_MASK; } if (events.flags & KVM_VCPUEVENT_VALID_SMM) { if (events.smi.smm) { env->hflags |= HF_SMM_MASK; } else { env->hflags &= ~HF_SMM_MASK; } if (events.smi.pending) { cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); } else { cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); } if (events.smi.smm_inside_nmi) { env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; } else { env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; } if (events.smi.latched_init) { cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); } else { cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); } } env->sipi_vector = events.sipi_vector; return 0; } static int kvm_guest_debug_workarounds(X86CPU *cpu) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; int ret = 0; unsigned long reinject_trap = 0; if (!kvm_has_vcpu_events()) { if (env->exception_injected == 1) { reinject_trap = KVM_GUESTDBG_INJECT_DB; } else if (env->exception_injected == 3) { reinject_trap = KVM_GUESTDBG_INJECT_BP; } env->exception_injected = -1; } /* * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF * injected via SET_GUEST_DEBUG while updating GP regs. Work around this * by updating the debug state once again if single-stepping is on. * Another reason to call kvm_update_guest_debug here is a pending debug * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to * reinject them via SET_GUEST_DEBUG. */ if (reinject_trap || (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) { ret = kvm_update_guest_debug(cs, reinject_trap); } return ret; } static int kvm_put_debugregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_debugregs dbgregs; int i; if (!kvm_has_debugregs()) { return 0; } for (i = 0; i < 4; i++) { dbgregs.db[i] = env->dr[i]; } dbgregs.dr6 = env->dr[6]; dbgregs.dr7 = env->dr[7]; dbgregs.flags = 0; return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs); } static int kvm_get_debugregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_debugregs dbgregs; int i, ret; if (!kvm_has_debugregs()) { return 0; } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); if (ret < 0) { return ret; } for (i = 0; i < 4; i++) { env->dr[i] = dbgregs.db[i]; } env->dr[4] = env->dr[6] = dbgregs.dr6; env->dr[5] = env->dr[7] = dbgregs.dr7; return 0; } int kvm_arch_put_registers(CPUState *cpu, int level) { X86CPU *x86_cpu = X86_CPU(cpu); int ret; assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_msr_feature_control(x86_cpu); if (ret < 0) { return ret; } } if (level == KVM_PUT_FULL_STATE) { /* We don't check for kvm_arch_set_tsc_khz() errors here, * because TSC frequency mismatch shouldn't abort migration, * unless the user explicitly asked for a more strict TSC * setting (e.g. using an explicit "tsc-freq" option). */ kvm_arch_set_tsc_khz(cpu); } ret = kvm_getput_regs(x86_cpu, 1); if (ret < 0) { return ret; } ret = kvm_put_xsave(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_xcrs(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_sregs(x86_cpu); if (ret < 0) { return ret; } /* must be before kvm_put_msrs */ ret = kvm_inject_mce_oldstyle(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_msrs(x86_cpu, level); if (ret < 0) { return ret; } if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_mp_state(x86_cpu); if (ret < 0) { return ret; } } ret = kvm_put_tscdeadline_msr(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_vcpu_events(x86_cpu, level); if (ret < 0) { return ret; } ret = kvm_put_debugregs(x86_cpu); if (ret < 0) { return ret; } /* must be last */ ret = kvm_guest_debug_workarounds(x86_cpu); if (ret < 0) { return ret; } return 0; } int kvm_arch_get_registers(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); int ret; assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs)); ret = kvm_getput_regs(cpu, 0); if (ret < 0) { goto out; } ret = kvm_get_xsave(cpu); if (ret < 0) { goto out; } ret = kvm_get_xcrs(cpu); if (ret < 0) { goto out; } ret = kvm_get_sregs(cpu); if (ret < 0) { goto out; } ret = kvm_get_msrs(cpu); if (ret < 0) { goto out; } ret = kvm_get_mp_state(cpu); if (ret < 0) { goto out; } ret = kvm_get_apic(cpu); if (ret < 0) { goto out; } ret = kvm_get_vcpu_events(cpu); if (ret < 0) { goto out; } ret = kvm_get_debugregs(cpu); if (ret < 0) { goto out; } ret = 0; out: cpu_sync_bndcs_hflags(&cpu->env); return ret; } void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; int ret; /* Inject NMI */ if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { qemu_mutex_lock_iothread(); cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; qemu_mutex_unlock_iothread(); DPRINTF("injected NMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_NMI); if (ret < 0) { fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", strerror(-ret)); } } if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { qemu_mutex_lock_iothread(); cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; qemu_mutex_unlock_iothread(); DPRINTF("injected SMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_SMI); if (ret < 0) { fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n", strerror(-ret)); } } } if (!kvm_pic_in_kernel()) { qemu_mutex_lock_iothread(); } /* Force the VCPU out of its inner loop to process any INIT requests * or (for userspace APIC, but it is cheap to combine the checks here) * pending TPR access reports. */ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { cpu->exit_request = 1; } if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { cpu->exit_request = 1; } } if (!kvm_pic_in_kernel()) { /* Try to inject an interrupt if the guest can accept it */ if (run->ready_for_interrupt_injection && (cpu->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) { int irq; cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { struct kvm_interrupt intr; intr.irq = irq; DPRINTF("injected interrupt %d\n", irq); ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr); if (ret < 0) { fprintf(stderr, "KVM: injection failed, interrupt lost (%s)\n", strerror(-ret)); } } } /* If we have an interrupt but the guest is not ready to receive an * interrupt, request an interrupt window exit. This will * cause a return to userspace as soon as the guest is ready to * receive interrupts. */ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { run->request_interrupt_window = 1; } else { run->request_interrupt_window = 0; } DPRINTF("setting tpr\n"); run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); qemu_mutex_unlock_iothread(); } } MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; if (run->flags & KVM_RUN_X86_SMM) { env->hflags |= HF_SMM_MASK; } else { env->hflags &= HF_SMM_MASK; } if (run->if_flag) { env->eflags |= IF_MASK; } else { env->eflags &= ~IF_MASK; } /* We need to protect the apic state against concurrent accesses from * different threads in case the userspace irqchip is used. */ if (!kvm_irqchip_in_kernel()) { qemu_mutex_lock_iothread(); } cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); if (!kvm_irqchip_in_kernel()) { qemu_mutex_unlock_iothread(); } return cpu_get_mem_attrs(env); } int kvm_arch_process_async_events(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; if (cs->interrupt_request & CPU_INTERRUPT_MCE) { /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ assert(env->mcg_cap); cs->interrupt_request &= ~CPU_INTERRUPT_MCE; kvm_cpu_synchronize_state(cs); if (env->exception_injected == EXCP08_DBLE) { /* this means triple fault */ qemu_system_reset_request(); cs->exit_request = 1; return 0; } env->exception_injected = EXCP12_MCHK; env->has_error_code = 0; cs->halted = 0; if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) { env->mp_state = KVM_MP_STATE_RUNNABLE; } } if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { kvm_cpu_synchronize_state(cs); do_cpu_init(cpu); } if (kvm_irqchip_in_kernel()) { return 0; } if (cs->interrupt_request & CPU_INTERRUPT_POLL) { cs->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(cpu->apic_state); } if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || (cs->interrupt_request & CPU_INTERRUPT_NMI)) { cs->halted = 0; } if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { kvm_cpu_synchronize_state(cs); do_cpu_sipi(cpu); } if (cs->interrupt_request & CPU_INTERRUPT_TPR) { cs->interrupt_request &= ~CPU_INTERRUPT_TPR; kvm_cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, env->tpr_access_type); } return cs->halted; } static int kvm_handle_halt(X86CPU *cpu) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { cs->halted = 1; return EXCP_HLT; } return 0; } static int kvm_handle_tpr_access(X86CPU *cpu) { CPUState *cs = CPU(cpu); struct kvm_run *run = cs->kvm_run; apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip, run->tpr_access.is_write ? TPR_ACCESS_WRITE : TPR_ACCESS_READ); return 1; } int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) { static const uint8_t int3 = 0xcc; if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) { return -EINVAL; } return 0; } int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) { uint8_t int3; if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { return -EINVAL; } return 0; } static struct { target_ulong addr; int len; int type; } hw_breakpoint[4]; static int nb_hw_breakpoint; static int find_hw_breakpoint(target_ulong addr, int len, int type) { int n; for (n = 0; n < nb_hw_breakpoint; n++) { if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type && (hw_breakpoint[n].len == len || len == -1)) { return n; } } return -1; } int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type) { switch (type) { case GDB_BREAKPOINT_HW: len = 1; break; case GDB_WATCHPOINT_WRITE: case GDB_WATCHPOINT_ACCESS: switch (len) { case 1: break; case 2: case 4: case 8: if (addr & (len - 1)) { return -EINVAL; } break; default: return -EINVAL; } break; default: return -ENOSYS; } if (nb_hw_breakpoint == 4) { return -ENOBUFS; } if (find_hw_breakpoint(addr, len, type) >= 0) { return -EEXIST; } hw_breakpoint[nb_hw_breakpoint].addr = addr; hw_breakpoint[nb_hw_breakpoint].len = len; hw_breakpoint[nb_hw_breakpoint].type = type; nb_hw_breakpoint++; return 0; } int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type) { int n; n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); if (n < 0) { return -ENOENT; } nb_hw_breakpoint--; hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; return 0; } void kvm_arch_remove_all_hw_breakpoints(void) { nb_hw_breakpoint = 0; } static CPUWatchpoint hw_watchpoint; static int kvm_handle_debug(X86CPU *cpu, struct kvm_debug_exit_arch *arch_info) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; int ret = 0; int n; if (arch_info->exception == 1) { if (arch_info->dr6 & (1 << 14)) { if (cs->singlestep_enabled) { ret = EXCP_DEBUG; } } else { for (n = 0; n < 4; n++) { if (arch_info->dr6 & (1 << n)) { switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { case 0x0: ret = EXCP_DEBUG; break; case 0x1: ret = EXCP_DEBUG; cs->watchpoint_hit = &hw_watchpoint; hw_watchpoint.vaddr = hw_breakpoint[n].addr; hw_watchpoint.flags = BP_MEM_WRITE; break; case 0x3: ret = EXCP_DEBUG; cs->watchpoint_hit = &hw_watchpoint; hw_watchpoint.vaddr = hw_breakpoint[n].addr; hw_watchpoint.flags = BP_MEM_ACCESS; break; } } } } } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) { ret = EXCP_DEBUG; } if (ret == 0) { cpu_synchronize_state(cs); assert(env->exception_injected == -1); /* pass to guest */ env->exception_injected = arch_info->exception; env->has_error_code = 0; } return ret; } void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) { const uint8_t type_code[] = { [GDB_BREAKPOINT_HW] = 0x0, [GDB_WATCHPOINT_WRITE] = 0x1, [GDB_WATCHPOINT_ACCESS] = 0x3 }; const uint8_t len_code[] = { [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 }; int n; if (kvm_sw_breakpoints_active(cpu)) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; } if (nb_hw_breakpoint > 0) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; dbg->arch.debugreg[7] = 0x0600; for (n = 0; n < nb_hw_breakpoint; n++) { dbg->arch.debugreg[n] = hw_breakpoint[n].addr; dbg->arch.debugreg[7] |= (2 << (n * 2)) | (type_code[hw_breakpoint[n].type] << (16 + n*4)) | ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); } } } static bool host_supports_vmx(void) { uint32_t ecx, unused; host_cpuid(1, 0, &unused, &unused, &ecx, &unused); return ecx & CPUID_EXT_VMX; } #define VMX_INVALID_GUEST_STATE 0x80000021 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { X86CPU *cpu = X86_CPU(cs); uint64_t code; int ret; switch (run->exit_reason) { case KVM_EXIT_HLT: DPRINTF("handle_hlt\n"); qemu_mutex_lock_iothread(); ret = kvm_handle_halt(cpu); qemu_mutex_unlock_iothread(); break; case KVM_EXIT_SET_TPR: ret = 0; break; case KVM_EXIT_TPR_ACCESS: qemu_mutex_lock_iothread(); ret = kvm_handle_tpr_access(cpu); qemu_mutex_unlock_iothread(); break; case KVM_EXIT_FAIL_ENTRY: code = run->fail_entry.hardware_entry_failure_reason; fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n", code); if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) { fprintf(stderr, "\nIf you're running a guest on an Intel machine without " "unrestricted mode\n" "support, the failure can be most likely due to the guest " "entering an invalid\n" "state for Intel VT. For example, the guest maybe running " "in big real mode\n" "which is not supported on less recent Intel processors." "\n\n"); } ret = -1; break; case KVM_EXIT_EXCEPTION: fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n", run->ex.exception, run->ex.error_code); ret = -1; break; case KVM_EXIT_DEBUG: DPRINTF("kvm_exit_debug\n"); qemu_mutex_lock_iothread(); ret = kvm_handle_debug(cpu, &run->debug.arch); qemu_mutex_unlock_iothread(); break; case KVM_EXIT_HYPERV: ret = kvm_hv_handle_exit(cpu, &run->hyperv); break; case KVM_EXIT_IOAPIC_EOI: ioapic_eoi_broadcast(run->eoi.vector); ret = 0; break; default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; break; } return ret; } bool kvm_arch_stop_on_emulation_error(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; kvm_cpu_synchronize_state(cs); return !(env->cr[0] & CR0_PE_MASK) || ((env->segs[R_CS].selector & 3) != 3); } void kvm_arch_init_irq_routing(KVMState *s) { if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { /* If kernel can't do irq routing, interrupt source * override 0->2 cannot be set up as required by HPET. * So we have to disable it. */ no_hpet = 1; } /* We know at this point that we're using the in-kernel * irqchip, so we can use irqfds, and on x86 we know * we can use msi via irqfd and GSI routing. */ kvm_msi_via_irqfd_allowed = true; kvm_gsi_routing_allowed = true; if (kvm_irqchip_is_split()) { int i; /* If the ioapic is in QEMU and the lapics are in KVM, reserve MSI routes for signaling interrupts to the local apics. */ for (i = 0; i < IOAPIC_NUM_PINS; i++) { if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) { error_report("Could not enable split IRQ mode."); exit(1); } } } } int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) { int ret; if (machine_kernel_irqchip_split(ms)) { ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24); if (ret) { error_report("Could not enable split irqchip mode: %s", strerror(-ret)); exit(1); } else { DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n"); kvm_split_irqchip = true; return 1; } } else { return 0; } } /* Classic KVM device assignment interface. Will remain x86 only. */ int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr, uint32_t flags, uint32_t *dev_id) { struct kvm_assigned_pci_dev dev_data = { .segnr = dev_addr->domain, .busnr = dev_addr->bus, .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function), .flags = flags, }; int ret; dev_data.assigned_dev_id = (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn; ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data); if (ret < 0) { return ret; } *dev_id = dev_data.assigned_dev_id; return 0; } int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id) { struct kvm_assigned_pci_dev dev_data = { .assigned_dev_id = dev_id, }; return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data); } static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id, uint32_t irq_type, uint32_t guest_irq) { struct kvm_assigned_irq assigned_irq = { .assigned_dev_id = dev_id, .guest_irq = guest_irq, .flags = irq_type, }; if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) { return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq); } else { return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq); } } int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi, uint32_t guest_irq) { uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX | (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX); return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq); } int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked) { struct kvm_assigned_pci_dev dev_data = { .assigned_dev_id = dev_id, .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0, }; return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data); } static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id, uint32_t type) { struct kvm_assigned_irq assigned_irq = { .assigned_dev_id = dev_id, .flags = type, }; return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq); } int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi) { return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX | (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX)); } int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq) { return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI | KVM_DEV_IRQ_GUEST_MSI, virq); } int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id) { return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI | KVM_DEV_IRQ_HOST_MSI); } bool kvm_device_msix_supported(KVMState *s) { /* The kernel lacks a corresponding KVM_CAP, so we probe by calling * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT; } int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id, uint32_t nr_vectors) { struct kvm_assigned_msix_nr msix_nr = { .assigned_dev_id = dev_id, .entry_nr = nr_vectors, }; return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr); } int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector, int virq) { struct kvm_assigned_msix_entry msix_entry = { .assigned_dev_id = dev_id, .gsi = virq, .entry = vector, }; return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry); } int kvm_device_msix_assign(KVMState *s, uint32_t dev_id) { return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX | KVM_DEV_IRQ_GUEST_MSIX, 0); } int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id) { return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX | KVM_DEV_IRQ_HOST_MSIX); } int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, uint64_t address, uint32_t data, PCIDevice *dev) { X86IOMMUState *iommu = x86_iommu_get_default(); if (iommu) { int ret; MSIMessage src, dst; X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu); src.address = route->u.msi.address_hi; src.address <<= VTD_MSI_ADDR_HI_SHIFT; src.address |= route->u.msi.address_lo; src.data = route->u.msi.data; ret = class->int_remap(iommu, &src, &dst, dev ? \ pci_requester_id(dev) : \ X86_IOMMU_SID_INVALID); if (ret) { trace_kvm_x86_fixup_msi_error(route->gsi); return 1; } route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT; route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK; route->u.msi.data = dst.data; } return 0; } typedef struct MSIRouteEntry MSIRouteEntry; struct MSIRouteEntry { PCIDevice *dev; /* Device pointer */ int vector; /* MSI/MSIX vector index */ int virq; /* Virtual IRQ index */ QLIST_ENTRY(MSIRouteEntry) list; }; /* List of used GSI routes */ static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ QLIST_HEAD_INITIALIZER(msi_route_list); static void kvm_update_msi_routes_all(void *private, bool global, uint32_t index, uint32_t mask) { int cnt = 0; MSIRouteEntry *entry; MSIMessage msg; /* TODO: explicit route update */ QLIST_FOREACH(entry, &msi_route_list, list) { cnt++; msg = pci_get_msi_message(entry->dev, entry->vector); kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, entry->dev); } kvm_irqchip_commit_routes(kvm_state); trace_kvm_x86_update_msi_routes(cnt); } int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, int vector, PCIDevice *dev) { static bool notify_list_inited = false; MSIRouteEntry *entry; if (!dev) { /* These are (possibly) IOAPIC routes only used for split * kernel irqchip mode, while what we are housekeeping are * PCI devices only. */ return 0; } entry = g_new0(MSIRouteEntry, 1); entry->dev = dev; entry->vector = vector; entry->virq = route->gsi; QLIST_INSERT_HEAD(&msi_route_list, entry, list); trace_kvm_x86_add_msi_route(route->gsi); if (!notify_list_inited) { /* For the first time we do add route, add ourselves into * IOMMU's IEC notify list if needed. */ X86IOMMUState *iommu = x86_iommu_get_default(); if (iommu) { x86_iommu_iec_register_notifier(iommu, kvm_update_msi_routes_all, NULL); } notify_list_inited = true; } return 0; } int kvm_arch_release_virq_post(int virq) { MSIRouteEntry *entry, *next; QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) { if (entry->virq == virq) { trace_kvm_x86_remove_msi_route(virq); QLIST_REMOVE(entry, list); break; } } return 0; } int kvm_arch_msi_data_to_gsi(uint32_t data) { abort(); }
null
null
null
null
123,436
14,520
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,520
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_HISTORY_CORE_BROWSER_DOWNLOAD_SLICE_INFO_H_ #define COMPONENTS_HISTORY_CORE_BROWSER_DOWNLOAD_SLICE_INFO_H_ #include <stdint.h> #include <string> #include "components/history/core/browser/download_types.h" namespace history { // Contains the information for each slice of data that is written to the // download target file. A download file can have multiple slices and cach // slice will have a different offset. struct DownloadSliceInfo { DownloadSliceInfo(); DownloadSliceInfo(DownloadId download_id, int64_t offset, int64_t received_bytes, bool finished); DownloadSliceInfo(const DownloadSliceInfo& other); ~DownloadSliceInfo(); bool operator==(const DownloadSliceInfo&) const; // The id of the download in the database. DownloadId download_id; // Start position of the download request. int64_t offset; // The number of bytes received (so far). int64_t received_bytes; // If the download stream is successfully finished for this slice. bool finished; }; } // namespace history #endif // COMPONENTS_HISTORY_CORE_BROWSER_DOWNLOAD_SLICE_INFO_H_
null
null
null
null
11,383
38,191
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
38,191
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2009 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/public/platform/web_image.h" #include <algorithm> #include <memory> #include "base/memory/scoped_refptr.h" #include "third_party/blink/public/platform/web_data.h" #include "third_party/blink/public/platform/web_size.h" #include "third_party/blink/renderer/platform/drag_image.h" #include "third_party/blink/renderer/platform/graphics/bitmap_image.h" #include "third_party/blink/renderer/platform/graphics/image.h" #include "third_party/blink/renderer/platform/image-decoders/image_decoder.h" #include "third_party/blink/renderer/platform/shared_buffer.h" #include "third_party/blink/renderer/platform/wtf/vector.h" #include "third_party/skia/include/core/SkImage.h" namespace blink { WebImage WebImage::FromData(const WebData& data, const WebSize& desired_size) { std::unique_ptr<ImageDecoder> decoder(ImageDecoder::Create( data, true, ImageDecoder::kAlphaPremultiplied, ColorBehavior::Ignore())); if (!decoder || !decoder->IsSizeAvailable()) return WebImage(); // Frames are arranged by decreasing size, then decreasing bit depth. // Pick the frame closest to |desiredSize|'s area without being smaller, // which has the highest bit depth. const size_t frame_count = decoder->FrameCount(); size_t index = 0; // Default to first frame if none are large enough. int frame_area_at_index = 0; for (size_t i = 0; i < frame_count; ++i) { const IntSize frame_size = decoder->FrameSizeAtIndex(i); if (WebSize(frame_size) == desired_size) { index = i; break; // Perfect match. } const int frame_area = frame_size.Width() * frame_size.Height(); if (frame_area < (desired_size.width * desired_size.height)) break; // No more frames that are large enough. if (!i || (frame_area < frame_area_at_index)) { index = i; // Closer to desired area than previous best match. frame_area_at_index = frame_area; } } ImageFrame* frame = decoder->DecodeFrameBufferAtIndex(index); return (frame && !decoder->Failed()) ? WebImage(frame->Bitmap()) : WebImage(); } WebVector<WebImage> WebImage::FramesFromData(const WebData& data) { // This is to protect from malicious images. It should be big enough that it's // never hit in practice. const size_t kMaxFrameCount = 8; std::unique_ptr<ImageDecoder> decoder(ImageDecoder::Create( data, true, ImageDecoder::kAlphaPremultiplied, ColorBehavior::Ignore())); if (!decoder || !decoder->IsSizeAvailable()) return WebVector<WebImage>(); // Frames are arranged by decreasing size, then decreasing bit depth. // Keep the first frame at every size, has the highest bit depth. const size_t frame_count = decoder->FrameCount(); IntSize last_size; Vector<WebImage> frames; for (size_t i = 0; i < std::min(frame_count, kMaxFrameCount); ++i) { const IntSize frame_size = decoder->FrameSizeAtIndex(i); if (frame_size == last_size) continue; last_size = frame_size; ImageFrame* frame = decoder->DecodeFrameBufferAtIndex(i); if (!frame) continue; SkBitmap bitmap = frame->Bitmap(); if (!bitmap.isNull() && frame->GetStatus() == ImageFrame::kFrameComplete) frames.push_back(WebImage(bitmap)); } return frames; } WebVector<WebImage::AnimationFrame> WebImage::AnimationFromData( const WebData& data) { std::unique_ptr<ImageDecoder> decoder(ImageDecoder::Create( data, true, ImageDecoder::kAlphaPremultiplied, ColorBehavior::Ignore())); if (!decoder || !decoder->IsSizeAvailable() || decoder->FrameCount() == 0) return WebVector<WebImage::AnimationFrame>(); const size_t frame_count = decoder->FrameCount(); IntSize last_size = decoder->FrameSizeAtIndex(0); Vector<WebImage::AnimationFrame> frames; frames.ReserveCapacity(frame_count); for (size_t i = 0; i < frame_count; ++i) { // If frame size changes, this is most likely not an animation and is // instead an image with multiple versions at different resolutions. If // that's the case, return only the first frame (or no frames if we failed // decoding the first one). if (last_size != decoder->FrameSizeAtIndex(i)) { frames.resize(frames.IsEmpty() ? 0 : 1); return frames; } last_size = decoder->FrameSizeAtIndex(i); ImageFrame* frame = decoder->DecodeFrameBufferAtIndex(i); SkBitmap bitmap = frame->Bitmap(); if (bitmap.isNull() || frame->GetStatus() != ImageFrame::kFrameComplete) continue; // Make the bitmap a deep copy, otherwise the next loop iteration will // replace the contents of the previous frame. DecodeFrameBufferAtIndex // reuses the same underlying pixel buffer. bitmap.setImmutable(); AnimationFrame output; output.bitmap = bitmap; output.duration = frame->Duration(); frames.push_back(output); } return frames; } void WebImage::Reset() { bitmap_.reset(); } void WebImage::Assign(const WebImage& image) { bitmap_ = image.bitmap_; } bool WebImage::IsNull() const { return bitmap_.isNull(); } WebSize WebImage::Size() const { return WebSize(bitmap_.width(), bitmap_.height()); } WebImage::WebImage(scoped_refptr<Image> image, RespectImageOrientationEnum should_respect_image_orientation) { if (!image) return; PaintImage paint_image = image->PaintImageForCurrentFrame(); if (!paint_image) return; if (should_respect_image_orientation == kRespectImageOrientation && image->IsBitmapImage()) { ImageOrientation orientation = ToBitmapImage(image.get())->CurrentFrameOrientation(); paint_image = DragImage::ResizeAndOrientImage(paint_image, orientation); if (!paint_image) return; } if (sk_sp<SkImage> sk_image = paint_image.GetSkImage()) sk_image->asLegacyBitmap(&bitmap_); } } // namespace blink
null
null
null
null
35,054
28,736
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
193,731
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/of.h> #include <soc/tegra/common.h> static const struct of_device_id tegra_machine_match[] = { { .compatible = "nvidia,tegra20", }, { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra114", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra132", }, { .compatible = "nvidia,tegra210", }, { } }; bool soc_is_tegra(void) { struct device_node *root; root = of_find_node_by_path("/"); if (!root) return false; return of_match_node(tegra_machine_match, root) != NULL; }
null
null
null
null
102,078
41,681
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
206,676
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * This is used to for host and peripheral modes of the driver for * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. * * Board initialization should put one of these into dev->platform_data, * probably on some platform_device named "musb-hdrc". It encapsulates * key configuration differences between boards. */ #ifndef __LINUX_USB_MUSB_H #define __LINUX_USB_MUSB_H /* The USB role is defined by the connector used on the board, so long as * standards are being followed. (Developer boards sometimes won't.) */ enum musb_mode { MUSB_UNDEFINED = 0, MUSB_HOST, /* A or Mini-A connector */ MUSB_PERIPHERAL, /* B or Mini-B connector */ MUSB_OTG /* Mini-AB connector */ }; struct clk; enum musb_fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed)); enum musb_buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed)); struct musb_fifo_cfg { u8 hw_ep_num; enum musb_fifo_style style; enum musb_buf_mode mode; u16 maxpacket; }; #define MUSB_EP_FIFO(ep, st, m, pkt) \ { \ .hw_ep_num = ep, \ .style = st, \ .mode = m, \ .maxpacket = pkt, \ } #define MUSB_EP_FIFO_SINGLE(ep, st, pkt) \ MUSB_EP_FIFO(ep, st, BUF_SINGLE, pkt) #define MUSB_EP_FIFO_DOUBLE(ep, st, pkt) \ MUSB_EP_FIFO(ep, st, BUF_DOUBLE, pkt) struct musb_hdrc_eps_bits { const char name[16]; u8 bits; }; struct musb_hdrc_config { struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */ unsigned fifo_cfg_size; /* size of the fifo configuration */ /* MUSB configuration-specific details */ unsigned multipoint:1; /* multipoint device */ unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */ unsigned soft_con:1 __deprecated; /* soft connect required */ unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */ unsigned big_endian:1; /* true if CPU uses big-endian */ unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ unsigned high_iso_tx:1; /* Tx ep required for HB iso */ unsigned high_iso_rx:1; /* Rx ep required for HD iso */ unsigned dma:1 __deprecated; /* supports DMA */ unsigned vendor_req:1 __deprecated; /* vendor registers required */ /* need to explicitly de-assert the port reset after resume? */ unsigned host_port_deassert_reset_at_resume:1; u8 num_eps; /* number of endpoints _with_ ep0 */ u8 dma_channels __deprecated; /* number of dma channels */ u8 dyn_fifo_size; /* dynamic size in bytes */ u8 vendor_ctrl __deprecated; /* vendor control reg width */ u8 vendor_stat __deprecated; /* vendor status reg witdh */ u8 dma_req_chan __deprecated; /* bitmask for required dma channels */ u8 ram_bits; /* ram address size */ struct musb_hdrc_eps_bits *eps_bits __deprecated; #ifdef CONFIG_BLACKFIN /* A GPIO controlling VRSEL in Blackfin */ unsigned int gpio_vrsel; unsigned int gpio_vrsel_active; /* musb CLKIN in Blackfin in MHZ */ unsigned char clkin; #endif u32 maximum_speed; }; struct musb_hdrc_platform_data { /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ u8 mode; /* for clk_get() */ const char *clock; /* (HOST or OTG) switch VBUS on/off */ int (*set_vbus)(struct device *dev, int is_on); /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ u8 power; /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ u8 min_power; /* (HOST or OTG) msec/2 after VBUS on till power good */ u8 potpgt; /* (HOST or OTG) program PHY for external Vbus */ unsigned extvbus:1; /* Power the device on or off */ int (*set_power)(int state); /* MUSB configuration-specific details */ const struct musb_hdrc_config *config; /* Architecture specific board data */ void *board_data; /* Platform specific struct musb_ops pointer */ const void *platform_ops; }; enum musb_vbus_id_status { MUSB_UNKNOWN = 0, MUSB_ID_GROUND, MUSB_ID_FLOAT, MUSB_VBUS_VALID, MUSB_VBUS_OFF, }; #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) int musb_mailbox(enum musb_vbus_id_status status); #else static inline int musb_mailbox(enum musb_vbus_id_status status) { return 0; } #endif /* TUSB 6010 support */ #define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ #define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ #define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ #ifdef CONFIG_ARCH_OMAP2 extern int __init tusb6010_setup_interface( struct musb_hdrc_platform_data *data, unsigned ps_refclk, unsigned waitpin, unsigned async_cs, unsigned sync_cs, unsigned irq, unsigned dmachan); extern int tusb6010_platform_retime(unsigned is_refclk); #endif /* OMAP2 */ #endif /* __LINUX_USB_MUSB_H */
null
null
null
null
115,023
962
13,16,17
train_val
54d83fc74aa9ec72794373cb47432c5f7fb1a309
165,957
linux
1
https://github.com/torvalds/linux
2016-03-28 17:59:15+02:00
get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ip6t_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ipv6)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; }
CVE-2016-3134
CWE-119
https://github.com/torvalds/linux/commit/54d83fc74aa9ec72794373cb47432c5f7fb1a309
Low
3,851
23,230
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
23,230
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/background_fetch/storage/delete_registration_task.h" #include <utility> #include "content/browser/background_fetch/background_fetch.pb.h" #include "content/browser/background_fetch/storage/database_helpers.h" #include "content/browser/service_worker/service_worker_context_wrapper.h" namespace content { namespace background_fetch { namespace { #if DCHECK_IS_ON() // Checks that the |ActiveRegistrationUniqueIdKey| either does not exist, or is // associated with a different |unique_id| than the given one which should have // been already marked for deletion. void DCheckRegistrationNotActive(const std::string& unique_id, const std::vector<std::string>& data, ServiceWorkerStatusCode status) { switch (ToDatabaseStatus(status)) { case DatabaseStatus::kOk: DCHECK_EQ(1u, data.size()); DCHECK_NE(unique_id, data[0]) << "Must call MarkRegistrationForDeletion before DeleteRegistration"; return; case DatabaseStatus::kFailed: return; // TODO(crbug.com/780025): Consider logging failure to UMA. case DatabaseStatus::kNotFound: return; } } #endif // DCHECK_IS_ON() } // namespace DeleteRegistrationTask::DeleteRegistrationTask( BackgroundFetchDataManager* data_manager, int64_t service_worker_registration_id, const std::string& unique_id, HandleBackgroundFetchErrorCallback callback) : DatabaseTask(data_manager), service_worker_registration_id_(service_worker_registration_id), unique_id_(unique_id), callback_(std::move(callback)), weak_factory_(this) {} DeleteRegistrationTask::~DeleteRegistrationTask() = default; void DeleteRegistrationTask::Start() { #if DCHECK_IS_ON() // Get the registration |developer_id| to check it was deactivated. service_worker_context()->GetRegistrationUserData( service_worker_registration_id_, {RegistrationKey(unique_id_)}, base::BindOnce(&DeleteRegistrationTask::DidGetRegistration, weak_factory_.GetWeakPtr())); #else DidGetRegistration({}, SERVICE_WORKER_OK); #endif // DCHECK_IS_ON() } void DeleteRegistrationTask::DidGetRegistration( const std::vector<std::string>& data, ServiceWorkerStatusCode status) { #if DCHECK_IS_ON() if (ToDatabaseStatus(status) == DatabaseStatus::kOk) { DCHECK_EQ(1u, data.size()); proto::BackgroundFetchMetadata metadata_proto; if (metadata_proto.ParseFromString(data[0]) && metadata_proto.registration().has_developer_id()) { service_worker_context()->GetRegistrationUserData( service_worker_registration_id_, {ActiveRegistrationUniqueIdKey( metadata_proto.registration().developer_id())}, base::BindOnce(&DCheckRegistrationNotActive, unique_id_)); } else { NOTREACHED() << "Database is corrupt"; // TODO(crbug.com/780027): Nuke it. } } else { // TODO(crbug.com/780025): Log failure to UMA. } #endif // DCHECK_IS_ON() service_worker_context()->ClearRegistrationUserDataByKeyPrefixes( service_worker_registration_id_, {RegistrationKey(unique_id_), RequestKeyPrefix(unique_id_)}, base::BindOnce(&DeleteRegistrationTask::DidDeleteRegistration, weak_factory_.GetWeakPtr())); } void DeleteRegistrationTask::DidDeleteRegistration( ServiceWorkerStatusCode status) { switch (ToDatabaseStatus(status)) { case DatabaseStatus::kOk: case DatabaseStatus::kNotFound: std::move(callback_).Run(blink::mojom::BackgroundFetchError::NONE); Finished(); // Destroys |this|. return; case DatabaseStatus::kFailed: std::move(callback_).Run( blink::mojom::BackgroundFetchError::STORAGE_ERROR); Finished(); // Destroys |this|. return; } } } // namespace background_fetch } // namespace content
null
null
null
null
20,093
44,682
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
44,682
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef PPAPI_PROXY_LOCKING_RESOURCE_RELEASER_H_ #define PPAPI_PROXY_LOCKING_RESOURCE_RELEASER_H_ #include "base/macros.h" #include "ppapi/shared_impl/ppapi_globals.h" #include "ppapi/shared_impl/proxy_lock.h" #include "ppapi/shared_impl/resource_tracker.h" namespace ppapi { namespace proxy { // LockingResourceReleaser is a simple RAII class for releasing a resource at // the end of scope. This acquires the ProxyLock before releasing the resource. // It is for use in unit tests. Most proxy or implementation code should use // ScopedPPResource instead. Unit tests sometimes can't use ScopedPPResource // because it asserts that the ProxyLock is already held. class LockingResourceReleaser { public: explicit LockingResourceReleaser(PP_Resource resource) : resource_(resource) { } ~LockingResourceReleaser() { ProxyAutoLock lock; PpapiGlobals::Get()->GetResourceTracker()->ReleaseResource(resource_); } PP_Resource get() { return resource_; } private: PP_Resource resource_; DISALLOW_COPY_AND_ASSIGN(LockingResourceReleaser); }; } // namespace proxy } // namespace ppapi #endif // PPAPI_PROXY_LOCKING_RESOURCE_RELEASER_H_
null
null
null
null
41,545
17,313
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
182,308
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #ifndef _ASM_PERF_EVENT_H #define _ASM_PERF_EVENT_H #endif /* _ASM_PERF_EVENT_H */
null
null
null
null
90,655
54,404
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
54,404
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/command_line.h" #include "base/test/launcher/test_launcher.h" #include "chrome/test/base/chrome_test_launcher.h" #include "chrome/test/base/chrome_test_suite.h" int main(int argc, char** argv) { base::CommandLine::Init(argc, argv); size_t parallel_jobs = base::NumParallelJobs(); if (parallel_jobs == 0U) { return 1; } else if (parallel_jobs > 1U) { parallel_jobs /= 2U; } ChromeTestSuiteRunner runner; ChromeTestLauncherDelegate delegate(&runner); return LaunchChromeTests(parallel_jobs, &delegate, argc, argv); }
null
null
null
null
51,267
51,516
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,516
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/keyboard/keyboard_util.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/keyboard/keyboard_controller.h" #include "ui/keyboard/keyboard_test_util.h" #include "ui/keyboard/keyboard_ui.h" namespace keyboard { namespace { class KeyboardUtilTest : public testing::Test { public: KeyboardUtilTest() {} ~KeyboardUtilTest() override {} // Sets all flags controlling whether the keyboard should be shown to // their disabled state. void DisableAllFlags() { keyboard::SetAccessibilityKeyboardEnabled(false); keyboard::SetTouchKeyboardEnabled(false); keyboard::SetKeyboardShowOverride( keyboard::KEYBOARD_SHOW_OVERRIDE_DISABLED); keyboard::SetRequestedKeyboardState(keyboard::KEYBOARD_STATE_DISABLED); } // Sets all flags controlling whether the keyboard should be shown to // their enabled state. void EnableAllFlags() { keyboard::SetAccessibilityKeyboardEnabled(true); keyboard::SetTouchKeyboardEnabled(true); keyboard::SetKeyboardShowOverride(keyboard::KEYBOARD_SHOW_OVERRIDE_ENABLED); keyboard::SetRequestedKeyboardState(keyboard::KEYBOARD_STATE_ENABLED); } // Sets all flags controlling whether the keyboard should be shown to // their neutral state. void ResetAllFlags() { keyboard::SetAccessibilityKeyboardEnabled(false); keyboard::SetTouchKeyboardEnabled(false); keyboard::SetKeyboardShowOverride(keyboard::KEYBOARD_SHOW_OVERRIDE_NONE); keyboard::SetRequestedKeyboardState(keyboard::KEYBOARD_STATE_AUTO); } void SetUp() override { ResetAllFlags(); } private: DISALLOW_COPY_AND_ASSIGN(KeyboardUtilTest); }; } // namespace // Tests that we respect the accessibility setting. TEST_F(KeyboardUtilTest, AlwaysShowIfA11yEnabled) { // Disabled by default. EXPECT_FALSE(keyboard::IsKeyboardEnabled()); // If enabled by accessibility, should ignore other flag values. DisableAllFlags(); keyboard::SetAccessibilityKeyboardEnabled(true); EXPECT_TRUE(keyboard::IsKeyboardEnabled()); } // Tests that we respect the policy setting. TEST_F(KeyboardUtilTest, AlwaysShowIfPolicyEnabled) { EXPECT_FALSE(keyboard::IsKeyboardEnabled()); // If policy is enabled, should ignore other flag values. DisableAllFlags(); keyboard::SetKeyboardShowOverride(keyboard::KEYBOARD_SHOW_OVERRIDE_ENABLED); EXPECT_TRUE(keyboard::IsKeyboardEnabled()); } // Tests that we respect the policy setting. TEST_F(KeyboardUtilTest, HidesIfPolicyDisabled) { EXPECT_FALSE(keyboard::IsKeyboardEnabled()); EnableAllFlags(); // Set accessibility to neutral since accessibility has higher precedence. keyboard::SetAccessibilityKeyboardEnabled(false); EXPECT_TRUE(keyboard::IsKeyboardEnabled()); // Disable policy. Keyboard should be disabled. keyboard::SetKeyboardShowOverride(keyboard::KEYBOARD_SHOW_OVERRIDE_DISABLED); EXPECT_FALSE(keyboard::IsKeyboardEnabled()); } // Tests that the keyboard shows when requested state provided higher priority // flags have not been set. TEST_F(KeyboardUtilTest, ShowKeyboardWhenRequested) { DisableAllFlags(); // Remove device policy, which has higher precedence than us. keyboard::SetKeyboardShowOverride(keyboard::KEYBOARD_SHOW_OVERRIDE_NONE); EXPECT_FALSE(keyboard::IsKeyboardEnabled()); // Requested should have higher precedence than all the remaining flags. keyboard::SetRequestedKeyboardState(keyboard::KEYBOARD_STATE_ENABLED); EXPECT_TRUE(keyboard::IsKeyboardEnabled()); } // Tests that the touch keyboard is hidden when requested state is disabled and // higher priority flags have not been set. TEST_F(KeyboardUtilTest, HideKeyboardWhenRequested) { EnableAllFlags(); // Remove higher precedence flags. keyboard::SetKeyboardShowOverride(keyboard::KEYBOARD_SHOW_OVERRIDE_NONE); keyboard::SetAccessibilityKeyboardEnabled(false); EXPECT_TRUE(keyboard::IsKeyboardEnabled()); // Set requested state to disable. Keyboard should disable. keyboard::SetRequestedKeyboardState(keyboard::KEYBOARD_STATE_DISABLED); EXPECT_FALSE(keyboard::IsKeyboardEnabled()); } // SetTouchKeyboardEnabled has the lowest priority, but should still work when // none of the other flags are enabled. TEST_F(KeyboardUtilTest, HideKeyboardWhenTouchEnabled) { ResetAllFlags(); EXPECT_FALSE(keyboard::IsKeyboardEnabled()); keyboard::SetTouchKeyboardEnabled(true); EXPECT_TRUE(keyboard::IsKeyboardEnabled()); } TEST_F(KeyboardUtilTest, UpdateKeyboardConfig) { ResetAllFlags(); keyboard::KeyboardConfig config = keyboard::GetKeyboardConfig(); EXPECT_TRUE(config.spell_check); EXPECT_FALSE(keyboard::UpdateKeyboardConfig(config)); config.spell_check = false; EXPECT_TRUE(keyboard::UpdateKeyboardConfig(config)); EXPECT_FALSE(keyboard::GetKeyboardConfig().spell_check); EXPECT_FALSE(keyboard::UpdateKeyboardConfig(config)); } TEST_F(KeyboardUtilTest, IsOverscrollEnabled) { ResetAllFlags(); // Return false when keyboard is disabled. EXPECT_FALSE(keyboard::IsKeyboardOverscrollEnabled()); // Enable the virtual keyboard. keyboard::SetTouchKeyboardEnabled(true); EXPECT_TRUE(keyboard::IsKeyboardOverscrollEnabled()); // Override overscroll enabled state. keyboard::SetKeyboardOverscrollOverride( KEYBOARD_OVERSCROLL_OVERRIDE_DISABLED); EXPECT_FALSE(keyboard::IsKeyboardOverscrollEnabled()); keyboard::SetKeyboardOverscrollOverride(KEYBOARD_OVERSCROLL_OVERRIDE_NONE); EXPECT_TRUE(keyboard::IsKeyboardOverscrollEnabled()); // Set keyboard_locked() to true. ui::DummyInputMethod input_method; KeyboardController::ResetInstance(new KeyboardController( std::make_unique<TestKeyboardUI>(&input_method), nullptr)); KeyboardController* controller = keyboard::KeyboardController::GetInstance(); controller->set_keyboard_locked(true); EXPECT_TRUE(controller->keyboard_locked()); EXPECT_FALSE(keyboard::IsKeyboardOverscrollEnabled()); KeyboardController::ResetInstance(nullptr); } } // namespace keyboard
null
null
null
null
48,379
1,088
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,145
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * DivX (XSUB) subtitle encoder * Copyright (c) 2005 DivX, Inc. * Copyright (c) 2009 Bjorn Axelsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "bytestream.h" #include "put_bits.h" /** * Number of pixels to pad left and right. * * The official encoder pads the subtitles with two pixels on either side, * but until we find out why, we won't do it (we will pad to have width * divisible by 2 though). */ #define PADDING 0 #define PADDING_COLOR 0 /** * Encode a single color run. At most 16 bits will be used. * @param len length of the run, values > 255 mean "until end of line", may not be < 0. * @param color color to encode, only the lowest two bits are used and all others must be 0. */ static void put_xsub_rle(PutBitContext *pb, int len, int color) { if (len <= 255) put_bits(pb, 2 + ((ff_log2_tab[len] >> 1) << 2), len); else put_bits(pb, 14, 0); put_bits(pb, 2, color); } /** * Encode a 4-color bitmap with XSUB rle. * * The encoded bitmap may be wider than the source bitmap due to padding. */ static int xsub_encode_rle(PutBitContext *pb, const uint8_t *bitmap, int linesize, int w, int h) { int x0, x1, y, len, color = PADDING_COLOR; for (y = 0; y < h; y++) { x0 = 0; while (x0 < w) { // Make sure we have enough room for at least one run and padding if (pb->size_in_bits - put_bits_count(pb) < 7*8) return -1; x1 = x0; color = bitmap[x1++] & 3; while (x1 < w && (bitmap[x1] & 3) == color) x1++; len = x1 - x0; if (PADDING && x0 == 0) { if (color == PADDING_COLOR) { len += PADDING; x0 -= PADDING; } else put_xsub_rle(pb, PADDING, PADDING_COLOR); } // Run can't be longer than 255, unless it is the rest of a row if (x1 == w && color == PADDING_COLOR) { len += PADDING + (w&1); } else len = FFMIN(len, 255); put_xsub_rle(pb, len, color); x0 += len; } if (color != PADDING_COLOR && (PADDING + (w&1))) put_xsub_rle(pb, PADDING + (w&1), PADDING_COLOR); avpriv_align_put_bits(pb); bitmap += linesize; } return 0; } static int make_tc(uint64_t ms, int *tc) { static const int tc_divs[3] = { 1000, 60, 60 }; int i; for (i=0; i<3; i++) { tc[i] = ms % tc_divs[i]; ms /= tc_divs[i]; } tc[3] = ms; return ms > 99; } static int xsub_encode(AVCodecContext *avctx, unsigned char *buf, int bufsize, const AVSubtitle *h) { uint64_t startTime = h->pts / 1000; // FIXME: need better solution... uint64_t endTime = startTime + h->end_display_time - h->start_display_time; int start_tc[4], end_tc[4]; uint8_t *hdr = buf + 27; // Point behind the timestamp uint8_t *rlelenptr; uint16_t width, height; int i; PutBitContext pb; if (bufsize < 27 + 7*2 + 4*3) { av_log(avctx, AV_LOG_ERROR, "Buffer too small for XSUB header.\n"); return -1; } // TODO: support multiple rects if (h->num_rects != 1) av_log(avctx, AV_LOG_WARNING, "Only single rects supported (%d in subtitle.)\n", h->num_rects); #if FF_API_AVPICTURE FF_DISABLE_DEPRECATION_WARNINGS if (!h->rects[0]->data[0]) { AVSubtitleRect *rect = h->rects[0]; int j; for (j = 0; j < 4; j++) { rect->data[j] = rect->pict.data[j]; rect->linesize[j] = rect->pict.linesize[j]; } } FF_ENABLE_DEPRECATION_WARNINGS #endif // TODO: render text-based subtitles into bitmaps if (!h->rects[0]->data[0] || !h->rects[0]->data[1]) { av_log(avctx, AV_LOG_WARNING, "No subtitle bitmap available.\n"); return -1; } // TODO: color reduction, similar to dvdsub encoder if (h->rects[0]->nb_colors > 4) av_log(avctx, AV_LOG_WARNING, "No more than 4 subtitle colors supported (%d found.)\n", h->rects[0]->nb_colors); // TODO: Palette swapping if color zero is not transparent if (((uint32_t *)h->rects[0]->data[1])[0] & 0xff000000) av_log(avctx, AV_LOG_WARNING, "Color index 0 is not transparent. Transparency will be messed up.\n"); if (make_tc(startTime, start_tc) || make_tc(endTime, end_tc)) { av_log(avctx, AV_LOG_WARNING, "Time code >= 100 hours.\n"); return -1; } snprintf(buf, 28, "[%02d:%02d:%02d.%03d-%02d:%02d:%02d.%03d]", start_tc[3], start_tc[2], start_tc[1], start_tc[0], end_tc[3], end_tc[2], end_tc[1], end_tc[0]); // Width and height must probably be multiples of 2. // 2 pixels required on either side of subtitle. // Possibly due to limitations of hardware renderers. // TODO: check if the bitmap is already padded width = FFALIGN(h->rects[0]->w, 2) + PADDING * 2; height = FFALIGN(h->rects[0]->h, 2); bytestream_put_le16(&hdr, width); bytestream_put_le16(&hdr, height); bytestream_put_le16(&hdr, h->rects[0]->x); bytestream_put_le16(&hdr, h->rects[0]->y); bytestream_put_le16(&hdr, h->rects[0]->x + width -1); bytestream_put_le16(&hdr, h->rects[0]->y + height -1); rlelenptr = hdr; // Will store length of first field here later. hdr+=2; // Palette for (i=0; i<4; i++) bytestream_put_be24(&hdr, ((uint32_t *)h->rects[0]->data[1])[i]); // Bitmap // RLE buffer. Reserve 2 bytes for possible padding after the last row. init_put_bits(&pb, hdr, bufsize - (hdr - buf) - 2); if (xsub_encode_rle(&pb, h->rects[0]->data[0], h->rects[0]->linesize[0] * 2, h->rects[0]->w, (h->rects[0]->h + 1) >> 1)) return -1; bytestream_put_le16(&rlelenptr, put_bits_count(&pb) >> 3); // Length of first field if (xsub_encode_rle(&pb, h->rects[0]->data[0] + h->rects[0]->linesize[0], h->rects[0]->linesize[0] * 2, h->rects[0]->w, h->rects[0]->h >> 1)) return -1; // Enforce total height to be a multiple of 2 if (h->rects[0]->h & 1) { put_xsub_rle(&pb, h->rects[0]->w, PADDING_COLOR); avpriv_align_put_bits(&pb); } flush_put_bits(&pb); return hdr - buf + put_bits_count(&pb)/8; } static av_cold int xsub_encoder_init(AVCodecContext *avctx) { if (!avctx->codec_tag) avctx->codec_tag = MKTAG('D','X','S','B'); avctx->bits_per_coded_sample = 4; return 0; } AVCodec ff_xsub_encoder = { .name = "xsub", .long_name = NULL_IF_CONFIG_SMALL("DivX subtitles (XSUB)"), .type = AVMEDIA_TYPE_SUBTITLE, .id = AV_CODEC_ID_XSUB, .init = xsub_encoder_init, .encode_sub = xsub_encode, };
null
null
null
null
70,200
49,964
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
49,964
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/base/platform_window_defaults.h" namespace ui { namespace { bool g_use_test_config = false; } // namespace bool UseTestConfigForPlatformWindows() { return g_use_test_config; } namespace test { void EnableTestConfigForPlatformWindows() { g_use_test_config = true; } } // namespace test } // namespace ui
null
null
null
null
46,827
58,296
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
58,296
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/strings/nullable_string16.h" #include "base/strings/string16.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "chrome/browser/webshare/share_target_pref_helper.h" #include "chrome/common/pref_names.h" #include "chrome/test/base/testing_profile.h" #include "components/prefs/pref_registry_simple.h" #include "components/prefs/testing_pref_service.h" #include "testing/gtest/include/gtest/gtest.h" class PrefRegistrySimple; namespace { class ShareTargetPrefHelperUnittest : public testing::Test { protected: ShareTargetPrefHelperUnittest() {} ~ShareTargetPrefHelperUnittest() override {} void SetUp() override { pref_service_.reset(new TestingPrefServiceSimple()); pref_service_->registry()->RegisterDictionaryPref( prefs::kWebShareVisitedTargets); } PrefService* pref_service() { return pref_service_.get(); } private: std::unique_ptr<TestingPrefServiceSimple> pref_service_; }; constexpr char kNameKey[] = "name"; constexpr char kUrlTemplateKey[] = "url_template"; TEST_F(ShareTargetPrefHelperUnittest, AddMultipleShareTargets) { // Add a share target to prefs that wasn't previously stored. GURL manifest_url("https://www.sharetarget.com/manifest.json"); content::Manifest::ShareTarget share_target; std::string url_template = "https://www.sharetarget.com/share?title={title}"; share_target.url_template = GURL(url_template); content::Manifest manifest; manifest.share_target = base::Optional<content::Manifest::ShareTarget>(share_target); UpdateShareTargetInPrefs(manifest_url, manifest, pref_service()); const base::DictionaryValue* share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(1UL, share_target_dict->size()); const base::DictionaryValue* share_target_info_dict = nullptr; ASSERT_TRUE(share_target_dict->GetDictionaryWithoutPathExpansion( manifest_url.spec(), &share_target_info_dict)); EXPECT_EQ(1UL, share_target_info_dict->size()); std::string url_template_in_dict; EXPECT_TRUE(share_target_info_dict->GetString(kUrlTemplateKey, &url_template_in_dict)); EXPECT_EQ(url_template, url_template_in_dict); // Add second share target to prefs that wasn't previously stored. manifest_url = GURL("https://www.sharetarget2.com/manifest.json"); std::string name = "Share Target Name"; manifest.name = base::NullableString16(base::ASCIIToUTF16(name), false); UpdateShareTargetInPrefs(manifest_url, manifest, pref_service()); share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(2UL, share_target_dict->size()); ASSERT_TRUE(share_target_dict->GetDictionaryWithoutPathExpansion( manifest_url.spec(), &share_target_info_dict)); EXPECT_EQ(2UL, share_target_info_dict->size()); EXPECT_TRUE(share_target_info_dict->GetString(kUrlTemplateKey, &url_template_in_dict)); EXPECT_EQ(url_template, url_template_in_dict); std::string name_in_dict; EXPECT_TRUE(share_target_info_dict->GetString(kNameKey, &name_in_dict)); EXPECT_EQ(name, name_in_dict); } TEST_F(ShareTargetPrefHelperUnittest, AddShareTargetTwice) { const char kManifestUrl[] = "https://www.sharetarget.com/manifest.json"; const char kUrlTemplate[] = "https://www.sharetarget.com/share/?title={title}"; // Add a share target to prefs that wasn't previously stored. GURL manifest_url(kManifestUrl); content::Manifest::ShareTarget share_target; share_target.url_template = GURL(kUrlTemplate); content::Manifest manifest; manifest.share_target = base::Optional<content::Manifest::ShareTarget>(share_target); UpdateShareTargetInPrefs(manifest_url, manifest, pref_service()); const base::DictionaryValue* share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(1UL, share_target_dict->size()); const base::DictionaryValue* share_target_info_dict = nullptr; ASSERT_TRUE(share_target_dict->GetDictionaryWithoutPathExpansion( kManifestUrl, &share_target_info_dict)); EXPECT_EQ(1UL, share_target_info_dict->size()); std::string url_template_in_dict; EXPECT_TRUE(share_target_info_dict->GetString(kUrlTemplateKey, &url_template_in_dict)); EXPECT_EQ(kUrlTemplate, url_template_in_dict); // Add same share target to prefs that was previously stored; shouldn't // duplicate it. UpdateShareTargetInPrefs(manifest_url, manifest, pref_service()); share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(1UL, share_target_dict->size()); ASSERT_TRUE(share_target_dict->GetDictionaryWithoutPathExpansion( kManifestUrl, &share_target_info_dict)); EXPECT_EQ(1UL, share_target_info_dict->size()); EXPECT_TRUE(share_target_info_dict->GetString(kUrlTemplateKey, &url_template_in_dict)); EXPECT_EQ(kUrlTemplate, url_template_in_dict); } TEST_F(ShareTargetPrefHelperUnittest, UpdateShareTarget) { // Add a share target to prefs that wasn't previously stored. GURL manifest_url("https://www.sharetarget.com/manifest.json"); content::Manifest::ShareTarget share_target; std::string url_template = "https://www.sharetarget.com/share/?title={title}"; share_target.url_template = GURL(url_template); content::Manifest manifest; manifest.share_target = base::Optional<content::Manifest::ShareTarget>(share_target); UpdateShareTargetInPrefs(manifest_url, manifest, pref_service()); const base::DictionaryValue* share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(1UL, share_target_dict->size()); const base::DictionaryValue* share_target_info_dict = nullptr; ASSERT_TRUE(share_target_dict->GetDictionaryWithoutPathExpansion( manifest_url.spec(), &share_target_info_dict)); EXPECT_EQ(1UL, share_target_info_dict->size()); std::string url_template_in_dict; EXPECT_TRUE(share_target_info_dict->GetString(kUrlTemplateKey, &url_template_in_dict)); EXPECT_EQ(url_template, url_template_in_dict); // Add same share target to prefs that was previously stored, with new // url_template_in_dict; should update the value. url_template = "https://www.sharetarget.com/share/?title={title}&text={text}"; manifest.share_target.value().url_template = GURL(url_template); UpdateShareTargetInPrefs(manifest_url, manifest, pref_service()); share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(1UL, share_target_dict->size()); ASSERT_TRUE(share_target_dict->GetDictionaryWithoutPathExpansion( manifest_url.spec(), &share_target_info_dict)); EXPECT_EQ(1UL, share_target_info_dict->size()); EXPECT_TRUE(share_target_info_dict->GetString(kUrlTemplateKey, &url_template_in_dict)); EXPECT_EQ(url_template, url_template_in_dict); } TEST_F(ShareTargetPrefHelperUnittest, DontAddNonShareTarget) { const char kManifestUrl[] = "https://www.dudsharetarget.com/manifest.json"; const base::Optional<std::string> kUrlTemplate; // Don't add a site that has a null template. UpdateShareTargetInPrefs(GURL(kManifestUrl), content::Manifest(), pref_service()); const base::DictionaryValue* share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(0UL, share_target_dict->size()); const base::DictionaryValue* share_target_info_dict = nullptr; ASSERT_FALSE(share_target_dict->GetDictionaryWithoutPathExpansion( kManifestUrl, &share_target_info_dict)); } TEST_F(ShareTargetPrefHelperUnittest, RemoveShareTarget) { // Add a share target to prefs that wasn't previously stored. GURL manifest_url("https://www.sharetarget.com/manifest.json"); content::Manifest::ShareTarget share_target; std::string url_template = "https://www.sharetarget.com/share/?title={title}"; share_target.url_template = GURL(url_template); content::Manifest manifest; manifest.share_target = base::Optional<content::Manifest::ShareTarget>(share_target); UpdateShareTargetInPrefs(manifest_url, manifest, pref_service()); const base::DictionaryValue* share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(1UL, share_target_dict->size()); const base::DictionaryValue* share_target_info_dict = nullptr; ASSERT_TRUE(share_target_dict->GetDictionaryWithoutPathExpansion( manifest_url.spec(), &share_target_info_dict)); EXPECT_EQ(1UL, share_target_info_dict->size()); std::string url_template_in_dict; EXPECT_TRUE(share_target_info_dict->GetString(kUrlTemplateKey, &url_template_in_dict)); EXPECT_EQ(url_template, url_template_in_dict); // Share target already added now has null template. Remove from prefs. manifest_url = GURL("https://www.sharetarget.com/manifest.json"); UpdateShareTargetInPrefs(manifest_url, content::Manifest(), pref_service()); share_target_dict = pref_service()->GetDictionary(prefs::kWebShareVisitedTargets); EXPECT_EQ(0UL, share_target_dict->size()); } } // namespace
null
null
null
null
55,159
20,383
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
185,378
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * ******************************************************************************/ #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ #define I40E_CLIENT_STR_LENGTH 10 /* Client interface version should be updated anytime there is a change in the * existing APIs or data structures. */ #define I40E_CLIENT_VERSION_MAJOR 0 #define I40E_CLIENT_VERSION_MINOR 01 #define I40E_CLIENT_VERSION_BUILD 00 #define I40E_CLIENT_VERSION_STR \ __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ __stringify(I40E_CLIENT_VERSION_MINOR) "." \ __stringify(I40E_CLIENT_VERSION_BUILD) struct i40e_client_version { u8 major; u8 minor; u8 build; u8 rsvd; }; enum i40e_client_state { __I40E_CLIENT_NULL, __I40E_CLIENT_REGISTERED }; enum i40e_client_instance_state { __I40E_CLIENT_INSTANCE_NONE, __I40E_CLIENT_INSTANCE_OPENED, }; enum i40e_client_type { I40E_CLIENT_IWARP, I40E_CLIENT_VMDQ2 }; struct i40e_ops; struct i40e_client; /* HW does not define a type value for AEQ; only for RX/TX and CEQ. * In order for us to keep the interface simple, SW will define a * unique type value for AEQ. */ #define I40E_QUEUE_TYPE_PE_AEQ 0x80 #define I40E_QUEUE_INVALID_IDX 0xFFFF struct i40e_qv_info { u32 v_idx; /* msix_vector */ u16 ceq_idx; u16 aeq_idx; u8 itr_idx; }; struct i40e_qvlist_info { u32 num_vectors; struct i40e_qv_info qv_info[1]; }; #define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF /* set of LAN parameters useful for clients managed by LAN */ /* Struct to hold per priority info */ struct i40e_prio_qos_params { u16 qs_handle; /* qs handle for prio */ u8 tc; /* TC mapped to prio */ u8 reserved; }; #define I40E_CLIENT_MAX_USER_PRIORITY 8 /* Struct to hold Client QoS */ struct i40e_qos_params { struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; }; struct i40e_params { struct i40e_qos_params qos; u16 mtu; }; /* Structure to hold Lan device info for a client device */ struct i40e_info { struct i40e_client_version version; u8 lanmac[6]; struct net_device *netdev; struct pci_dev *pcidev; u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ #define I40E_CLIENT_FTYPE_PF 0 #define I40E_CLIENT_FTYPE_VF 1 u8 ftype; /* function type, PF or VF */ void *pf; /* All L2 params that could change during the life span of the PF * and needs to be communicated to the client when they change */ struct i40e_qvlist_info *qvlist_info; struct i40e_params params; struct i40e_ops *ops; u16 msix_count; /* number of msix vectors*/ /* Array down below will be dynamically allocated based on msix_count */ struct msix_entry *msix_entries; u16 itr_index; /* Which ITR index the PE driver is suppose to use */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ u32 fw_build; /* firmware build number */ }; #define I40E_CLIENT_RESET_LEVEL_PF 1 #define I40E_CLIENT_RESET_LEVEL_CORE 2 #define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1) struct i40e_ops { /* setup_q_vector_list enables queues with a particular vector */ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, struct i40e_qvlist_info *qv_info); int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id, u8 *msg, u16 len); /* If the PE Engine is unresponsive, RDMA driver can request a reset. * The level helps determine the level of reset being requested. */ void (*request_reset)(struct i40e_info *ldev, struct i40e_client *client, u32 level); /* API for the RDMA driver to set certain VSI flags that control * PE Engine. */ int (*update_vsi_ctxt)(struct i40e_info *ldev, struct i40e_client *client, bool is_vf, u32 vf_id, u32 flag, u32 valid_flag); }; struct i40e_client_ops { /* Should be called from register_client() or whenever PF is ready * to create a specific client instance. */ int (*open)(struct i40e_info *ldev, struct i40e_client *client); /* Should be called when netdev is unavailable or when unregister * call comes in. If the close is happenening due to a reset being * triggered set the reset bit to true. */ void (*close)(struct i40e_info *ldev, struct i40e_client *client, bool reset); /* called when some l2 managed parameters changes - mtu */ void (*l2_param_change)(struct i40e_info *ldev, struct i40e_client *client, struct i40e_params *params); int (*virtchnl_receive)(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id, u8 *msg, u16 len); /* called when a VF is reset by the PF */ void (*vf_reset)(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id); /* called when the number of VFs changes */ void (*vf_enable)(struct i40e_info *ldev, struct i40e_client *client, u32 num_vfs); /* returns true if VF is capable of specified offload */ int (*vf_capable)(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id); }; /* Client device */ struct i40e_client_instance { struct list_head list; struct i40e_info lan_info; struct i40e_client *client; unsigned long state; }; struct i40e_client { struct list_head list; /* list of registered clients */ char name[I40E_CLIENT_STR_LENGTH]; struct i40e_client_version version; unsigned long state; /* client state */ atomic_t ref_cnt; /* Count of all the client devices of this kind */ u32 flags; #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) enum i40e_client_type type; const struct i40e_client_ops *ops; /* client ops provided by the client */ }; static inline bool i40e_client_is_registered(struct i40e_client *client) { return test_bit(__I40E_CLIENT_REGISTERED, &client->state); } /* used by clients */ int i40e_register_client(struct i40e_client *client); int i40e_unregister_client(struct i40e_client *client); #endif /* _I40E_CLIENT_H_ */
null
null
null
null
93,725
53,323
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
53,323
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stdint.h> #include "media/filters/h264_bitstream_buffer.h" #include "testing/gtest/include/gtest/gtest.h" namespace media { namespace { const uint64_t kTestPattern = 0xfedcba0987654321; } class H264BitstreamBufferAppendBitsTest : public ::testing::TestWithParam<uint64_t> {}; // TODO(posciak): More tests! TEST_P(H264BitstreamBufferAppendBitsTest, AppendAndVerifyBits) { H264BitstreamBuffer b; uint64_t num_bits = GetParam(); // TODO(posciak): Tests for >64 bits. ASSERT_LE(num_bits, 64u); uint64_t num_bytes = (num_bits + 7) / 8; b.AppendBits(num_bits, kTestPattern); b.FlushReg(); EXPECT_EQ(b.BytesInBuffer(), num_bytes); uint8_t* ptr = b.data(); uint64_t got = 0; uint64_t expected = kTestPattern; if (num_bits < 64) expected &= ((1ull << num_bits) - 1); while (num_bits > 8) { got |= (*ptr & 0xff); num_bits -= 8; got <<= (num_bits > 8 ? 8 : num_bits); ptr++; } if (num_bits > 0) { uint64_t temp = (*ptr & 0xff); temp >>= (8 - num_bits); got |= temp; } EXPECT_EQ(got, expected) << std::hex << "0x" << got << " vs 0x" << expected; } INSTANTIATE_TEST_CASE_P(AppendNumBits, H264BitstreamBufferAppendBitsTest, ::testing::Range(static_cast<uint64_t>(1), static_cast<uint64_t>(65))); } // namespace media
null
null
null
null
50,186
39,371
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
39,371
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2013 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/renderer/modules/serviceworkers/service_worker.h" #include <memory> #include "third_party/blink/public/mojom/service_worker/service_worker_state.mojom-blink.h" #include "third_party/blink/public/platform/web_string.h" #include "third_party/blink/renderer/bindings/core/v8/callback_promise_adapter.h" #include "third_party/blink/renderer/bindings/core/v8/exception_state.h" #include "third_party/blink/renderer/core/dom/events/event.h" #include "third_party/blink/renderer/core/dom/exception_code.h" #include "third_party/blink/renderer/core/execution_context/execution_context.h" #include "third_party/blink/renderer/core/messaging/blink_transferable_message.h" #include "third_party/blink/renderer/core/messaging/message_port.h" #include "third_party/blink/renderer/modules/event_target_modules.h" #include "third_party/blink/renderer/modules/serviceworkers/service_worker_container_client.h" #include "third_party/blink/renderer/platform/bindings/script_state.h" namespace blink { const AtomicString& ServiceWorker::InterfaceName() const { return EventTargetNames::ServiceWorker; } void ServiceWorker::postMessage(ScriptState* script_state, scoped_refptr<SerializedScriptValue> message, const MessagePortArray& ports, ExceptionState& exception_state) { ServiceWorkerContainerClient* client = ServiceWorkerContainerClient::From(GetExecutionContext()); if (!client || !client->Provider()) { exception_state.ThrowDOMException( kInvalidStateError, "Failed to post a message: No associated provider is available."); return; } BlinkTransferableMessage msg; msg.message = message; msg.ports = MessagePort::DisentanglePorts( ExecutionContext::From(script_state), ports, exception_state); if (exception_state.HadException()) return; if (handle_->ServiceWorker()->GetState() == mojom::blink::ServiceWorkerState::kRedundant) { exception_state.ThrowDOMException(kInvalidStateError, "ServiceWorker is in redundant state."); return; } handle_->ServiceWorker()->PostMessageToServiceWorker( ToTransferableMessage(std::move(msg))); } ScriptPromise ServiceWorker::InternalsTerminate(ScriptState* script_state) { ScriptPromiseResolver* resolver = ScriptPromiseResolver::Create(script_state); ScriptPromise promise = resolver->Promise(); handle_->ServiceWorker()->TerminateForTesting( std::make_unique<CallbackPromiseAdapter<void, void>>(resolver)); return promise; } void ServiceWorker::DispatchStateChangeEvent() { this->DispatchEvent(Event::Create(EventTypeNames::statechange)); } String ServiceWorker::scriptURL() const { return handle_->ServiceWorker()->Url().GetString(); } String ServiceWorker::state() const { switch (handle_->ServiceWorker()->GetState()) { case mojom::blink::ServiceWorkerState::kUnknown: // The web platform should never see this internal state NOTREACHED(); return "unknown"; case mojom::blink::ServiceWorkerState::kInstalling: return "installing"; case mojom::blink::ServiceWorkerState::kInstalled: return "installed"; case mojom::blink::ServiceWorkerState::kActivating: return "activating"; case mojom::blink::ServiceWorkerState::kActivated: return "activated"; case mojom::blink::ServiceWorkerState::kRedundant: return "redundant"; } NOTREACHED(); return g_null_atom; } ServiceWorker* ServiceWorker::From( ExecutionContext* execution_context, std::unique_ptr<WebServiceWorker::Handle> handle) { return GetOrCreate(execution_context, std::move(handle)); } bool ServiceWorker::HasPendingActivity() const { if (was_stopped_) return false; return handle_->ServiceWorker()->GetState() != mojom::blink::ServiceWorkerState::kRedundant; } void ServiceWorker::ContextDestroyed(ExecutionContext*) { was_stopped_ = true; } ServiceWorker* ServiceWorker::GetOrCreate( ExecutionContext* execution_context, std::unique_ptr<WebServiceWorker::Handle> handle) { if (!handle) return nullptr; ServiceWorker* existing_worker = static_cast<ServiceWorker*>(handle->ServiceWorker()->Proxy()); if (existing_worker) { DCHECK_EQ(existing_worker->GetExecutionContext(), execution_context); return existing_worker; } return new ServiceWorker(execution_context, std::move(handle)); } ServiceWorker::ServiceWorker(ExecutionContext* execution_context, std::unique_ptr<WebServiceWorker::Handle> handle) : AbstractWorker(execution_context), handle_(std::move(handle)), was_stopped_(false) { DCHECK(handle_); handle_->ServiceWorker()->SetProxy(this); } ServiceWorker::~ServiceWorker() = default; void ServiceWorker::Trace(blink::Visitor* visitor) { AbstractWorker::Trace(visitor); } } // namespace blink
null
null
null
null
36,234
18,570
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,570
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_SUBRESOURCE_FILTER_CORE_COMMON_ACTIVATION_DECISION_H_ #define COMPONENTS_SUBRESOURCE_FILTER_CORE_COMMON_ACTIVATION_DECISION_H_ namespace subresource_filter { // NOTE: ActivationDecision backs a UMA histogram, so it is append-only. enum class ActivationDecision : int { // The activation decision is unknown, or not known yet. UNKNOWN, // Subresource filtering was activated. ACTIVATED, // Did not activate because subresource filtering was disabled by the // highest priority configuration whose activation conditions were met. ACTIVATION_DISABLED, // Did not activate because the main frame document URL had an unsupported // scheme. UNSUPPORTED_SCHEME, // Did not activate because although there was a configuration whose // activation conditions were met, the main frame URL was whitelisted. URL_WHITELISTED, // Did not activate because the main frame document URL did not match the // activation conditions of any of enabled configurations. ACTIVATION_CONDITIONS_NOT_MET, // Max value for enum. ACTIVATION_DECISION_MAX }; } // namespace subresource_filter #endif // COMPONENTS_SUBRESOURCE_FILTER_CORE_COMMON_ACTIVATION_DECISION_H_
null
null
null
null
15,433
24,783
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,778
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/bios.h> #include <subdev/bios/dcb.h> #include <subdev/bios/conn.h> u32 nvbios_connTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u32 dcb = dcb_table(bios, ver, hdr, cnt, len); if (dcb && *ver >= 0x30 && *hdr >= 0x16) { u32 data = nvbios_rd16(bios, dcb + 0x14); if (data) { *ver = nvbios_rd08(bios, data + 0); *hdr = nvbios_rd08(bios, data + 1); *cnt = nvbios_rd08(bios, data + 2); *len = nvbios_rd08(bios, data + 3); return data; } } return 0x00000000; } u32 nvbios_connTp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_connT *info) { u32 data = nvbios_connTe(bios, ver, hdr, cnt, len); memset(info, 0x00, sizeof(*info)); switch (!!data * *ver) { case 0x30: case 0x40: return data; default: break; } return 0x00000000; } u32 nvbios_connEe(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len) { u8 hdr, cnt; u32 data = nvbios_connTe(bios, ver, &hdr, &cnt, len); if (data && idx < cnt) return data + hdr + (idx * *len); return 0x00000000; } u32 nvbios_connEp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, struct nvbios_connE *info) { u32 data = nvbios_connEe(bios, idx, ver, len); memset(info, 0x00, sizeof(*info)); switch (!!data * *ver) { case 0x30: case 0x40: info->type = nvbios_rd08(bios, data + 0x00); info->location = nvbios_rd08(bios, data + 0x01) & 0x0f; info->hpd = (nvbios_rd08(bios, data + 0x01) & 0x30) >> 4; info->dp = (nvbios_rd08(bios, data + 0x01) & 0xc0) >> 6; if (*len < 4) return data; info->hpd |= (nvbios_rd08(bios, data + 0x02) & 0x03) << 2; info->dp |= nvbios_rd08(bios, data + 0x02) & 0x0c; info->di = (nvbios_rd08(bios, data + 0x02) & 0xf0) >> 4; info->hpd |= (nvbios_rd08(bios, data + 0x03) & 0x07) << 4; info->sr = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3; info->lcdid = (nvbios_rd08(bios, data + 0x03) & 0x70) >> 4; return data; default: break; } return 0x00000000; }
null
null
null
null
98,125
38,340
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
203,335
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2014, Michael Ellerman, IBM Corp. * Licensed under GPLv2. */ #include <stdio.h> #include <stdlib.h> #include "ebb.h" /* * Test basic access to the EBB regs, they should be user accessible with no * kernel interaction required. */ int reg_access(void) { uint64_t val, expected; SKIP_IF(!ebb_is_supported()); expected = 0x8000000100000000ull; mtspr(SPRN_BESCR, expected); val = mfspr(SPRN_BESCR); FAIL_IF(val != expected); expected = 0x0000000001000000ull; mtspr(SPRN_EBBHR, expected); val = mfspr(SPRN_EBBHR); FAIL_IF(val != expected); return 0; } int main(void) { return test_harness(reg_access, "reg_access"); }
null
null
null
null
111,682
24,673
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,668
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "nv04.h" #include <core/gpuobj.h> #define NV04_PDMA_SIZE (128 * 1024 * 1024) #define NV04_PDMA_PAGE ( 4 * 1024) /******************************************************************************* * VM map/unmap callbacks ******************************************************************************/ static void nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt, struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { pte = 0x00008 + (pte * 4); nvkm_kmap(pgt); while (cnt) { u32 page = PAGE_SIZE / NV04_PDMA_PAGE; u32 phys = (u32)*list++; while (cnt && page--) { nvkm_wo32(pgt, pte, phys | 3); phys += NV04_PDMA_PAGE; pte += 4; cnt -= 1; } } nvkm_done(pgt); } static void nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt) { pte = 0x00008 + (pte * 4); nvkm_kmap(pgt); while (cnt--) { nvkm_wo32(pgt, pte, 0x00000000); pte += 4; } nvkm_done(pgt); } static void nv04_vm_flush(struct nvkm_vm *vm) { } /******************************************************************************* * MMU subdev ******************************************************************************/ static int nv04_mmu_oneinit(struct nvkm_mmu *base) { struct nv04_mmu *mmu = nv04_mmu(base); struct nvkm_device *device = mmu->base.subdev.device; struct nvkm_memory *dma; int ret; ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL, &mmu->vm); if (ret) return ret; ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8, 16, true, &dma); mmu->vm->pgt[0].mem[0] = dma; mmu->vm->pgt[0].refcount[0] = 1; if (ret) return ret; nvkm_kmap(dma); nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */ nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1); nvkm_done(dma); return 0; } void * nv04_mmu_dtor(struct nvkm_mmu *base) { struct nv04_mmu *mmu = nv04_mmu(base); struct nvkm_device *device = mmu->base.subdev.device; if (mmu->vm) { nvkm_memory_del(&mmu->vm->pgt[0].mem[0]); nvkm_vm_ref(NULL, &mmu->vm, NULL); } if (mmu->nullp) { dma_free_coherent(device->dev, 16 * 1024, mmu->nullp, mmu->null); } return mmu; } int nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) { struct nv04_mmu *mmu; if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL))) return -ENOMEM; *pmmu = &mmu->base; nvkm_mmu_ctor(func, device, index, &mmu->base); return 0; } const struct nvkm_mmu_func nv04_mmu = { .oneinit = nv04_mmu_oneinit, .dtor = nv04_mmu_dtor, .limit = NV04_PDMA_SIZE, .dma_bits = 32, .pgt_bits = 32 - 12, .spg_shift = 12, .lpg_shift = 12, .map_sg = nv04_vm_map_sg, .unmap = nv04_vm_unmap, .flush = nv04_vm_flush, }; int nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) { return nv04_mmu_new_(&nv04_mmu, device, index, pmmu); }
null
null
null
null
98,015
25,207
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,207
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_COMMON_PERMISSIONS_PERMISSION_MESSAGE_UTIL_H_ #define EXTENSIONS_COMMON_PERMISSIONS_PERMISSION_MESSAGE_UTIL_H_ #include <set> #include <string> namespace extensions { class URLPatternSet; } namespace permission_message_util { std::set<std::string> GetDistinctHosts( const extensions::URLPatternSet& host_patterns, bool include_rcd, bool exclude_file_scheme); } // namespace permission_message_util #endif // EXTENSIONS_COMMON_PERMISSIONS_PERMISSION_MESSAGE_UTIL_H_
null
null
null
null
22,070
19,915
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
19,915
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/public/test/test_browser_thread.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/message_loop/message_loop.h" #include "base/threading/thread.h" #include "build/build_config.h" #include "content/browser/browser_process_sub_thread.h" #include "content/browser/browser_thread_impl.h" namespace content { TestBrowserThread::TestBrowserThread(BrowserThread::ID identifier) : identifier_(identifier), real_thread_(std::make_unique<BrowserProcessSubThread>(identifier_)) { real_thread_->AllowBlockingForTesting(); } TestBrowserThread::TestBrowserThread(BrowserThread::ID identifier, base::MessageLoop* message_loop) : identifier_(identifier), fake_thread_( new BrowserThreadImpl(identifier_, message_loop->task_runner())) {} TestBrowserThread::~TestBrowserThread() { // The upcoming BrowserThreadImpl::ResetGlobalsForTesting() call requires that // |identifier_| have completed its SHUTDOWN phase. real_thread_.reset(); fake_thread_.reset(); // Resets BrowserThreadImpl's globals so that |identifier_| is no longer // bound. This is fine since the underlying MessageLoop has already been // flushed and deleted above. In the case of an externally provided // MessageLoop however, this means that TaskRunners obtained through // |BrowserThreadImpl::GetTaskRunnerForThread(identifier_)| will no longer // recognize their BrowserThreadImpl for RunsTasksInCurrentSequence(). This // happens most often when such verifications are made from // MessageLoop::DestructionObservers. Callers that care to work around that // should instead use this shutdown sequence: // 1) TestBrowserThread::Stop() // 2) ~MessageLoop() // 3) ~TestBrowserThread() // (~TestBrowserThreadBundle() does this). BrowserThreadImpl::ResetGlobalsForTesting(identifier_); } void TestBrowserThread::Start() { CHECK(real_thread_->Start()); RegisterAsBrowserThread(); } void TestBrowserThread::StartAndWaitForTesting() { CHECK(real_thread_->StartAndWaitForTesting()); RegisterAsBrowserThread(); } void TestBrowserThread::StartIOThread() { StartIOThreadUnregistered(); RegisterAsBrowserThread(); } void TestBrowserThread::StartIOThreadUnregistered() { base::Thread::Options options; options.message_loop_type = base::MessageLoop::TYPE_IO; CHECK(real_thread_->StartWithOptions(options)); } void TestBrowserThread::RegisterAsBrowserThread() { real_thread_->RegisterAsBrowserThread(); } void TestBrowserThread::Stop() { if (real_thread_) real_thread_->Stop(); } } // namespace content
null
null
null
null
16,778
970
2,3,5
train_val
116d0963cadfbf55ef2ec3d13781987c4d80517a
970
Chrome
1
https://github.com/chromium/chromium
2012-08-24 23:22:28+00:00
int GetAvailableDraftPageCount() { int page_data_map_size = page_data_map_.size(); if (page_data_map_.find(printing::COMPLETE_PREVIEW_DOCUMENT_INDEX) != page_data_map_.end()) { page_data_map_size--; } return page_data_map_size; }
CVE-2012-2891
CWE-200
https://github.com/chromium/chromium/commit/116d0963cadfbf55ef2ec3d13781987c4d80517a
Low
970
32,394
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
32,394
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "build/build_config.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/public/platform/scheduler/test/renderer_scheduler_test_support.h" #include "third_party/blink/public/platform/task_type.h" #include "third_party/blink/public/web/web_local_frame.h" #include "third_party/blink/public/web/web_script_execution_callback.h" #include "third_party/blink/public/web/web_script_source.h" #include "third_party/blink/public/web/web_view.h" #include "third_party/blink/renderer/core/testing/sim/sim_request.h" #include "third_party/blink/renderer/core/testing/sim/sim_test.h" #include "third_party/blink/renderer/platform/scheduler/public/page_scheduler.h" #include "third_party/blink/renderer/platform/testing/unit_test_helpers.h" namespace blink { namespace virtual_time_test { class ScriptExecutionCallbackHelper : public WebScriptExecutionCallback { public: const String Result() const { return result_; } private: void Completed(const WebVector<v8::Local<v8::Value>>& values) override { if (!values.IsEmpty() && !values[0].IsEmpty() && values[0]->IsString()) { result_ = ToCoreString(v8::Local<v8::String>::Cast(values[0])); } } String result_; }; class VirtualTimeTest : public SimTest { protected: void SetUp() override { SimTest::SetUp(); WebView().Scheduler()->EnableVirtualTime(); } String ExecuteJavaScript(String script_source) { ScriptExecutionCallbackHelper callback_helper; WebView() .MainFrame() ->ToWebLocalFrame() ->RequestExecuteScriptAndReturnValue( WebScriptSource(WebString(script_source)), false, &callback_helper); return callback_helper.Result(); } void TearDown() override { // The SimTest destructor calls runPendingTasks. This is a problem because // if there are any repeating tasks, advancing virtual time will cause the // runloop to busy loop. Disabling virtual time here fixes that. WebView().Scheduler()->DisableVirtualTimeForTesting(); } void StopVirtualTimeAndExitRunLoop() { WebView().Scheduler()->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kPause); test::ExitRunLoop(); } // Some task queues may have repeating v8 tasks that run forever so we impose // a hard (virtual) time limit. void RunTasksForPeriod(double delay_ms) { scheduler::GetSingleThreadTaskRunnerForTesting()->PostDelayedTask( FROM_HERE, WTF::Bind(&VirtualTimeTest::StopVirtualTimeAndExitRunLoop, WTF::Unretained(this)), TimeDelta::FromMillisecondsD(delay_ms)); test::EnterRunLoop(); } }; // http://crbug.com/633321 #if defined(OS_ANDROID) #define MAYBE_DOMTimersFireInExpectedOrder DISABLED_DOMTimersFireInExpectedOrder #else #define MAYBE_DOMTimersFireInExpectedOrder DOMTimersFireInExpectedOrder #endif TEST_F(VirtualTimeTest, MAYBE_DOMTimersFireInExpectedOrder) { WebView().Scheduler()->EnableVirtualTime(); WebView().Scheduler()->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kAdvance); ExecuteJavaScript( "var run_order = [];" "function timerFn(delay, value) {" " setTimeout(function() { run_order.push(value); }, delay);" "};" "var one_minute = 60 * 1000;" "timerFn(one_minute * 4, 'a');" "timerFn(one_minute * 2, 'b');" "timerFn(one_minute, 'c');"); // Normally the JS runs pretty much instantly but the timer callbacks will // take 4 mins to fire, but thanks to timer fast forwarding we can make them // fire immediatly. RunTasksForPeriod(60 * 1000 * 4); EXPECT_EQ("c, b, a", ExecuteJavaScript("run_order.join(', ')")); } // http://crbug.com/633321 #if defined(OS_ANDROID) #define MAYBE_SetInterval DISABLED_SetInterval #else #define MAYBE_SetInterval SetInterval #endif TEST_F(VirtualTimeTest, MAYBE_SetInterval) { WebView().Scheduler()->EnableVirtualTime(); WebView().Scheduler()->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kAdvance); ExecuteJavaScript( "var run_order = [];" "var count = 10;" "var interval_handle = setInterval(function() {" " if (--window.count == 0) {" " clearInterval(interval_handle);" " }" " run_order.push(count);" "}, 1000);" "setTimeout(function() { run_order.push('timer'); }, 1500);"); RunTasksForPeriod(10001); EXPECT_EQ("9, timer, 8, 7, 6, 5, 4, 3, 2, 1, 0", ExecuteJavaScript("run_order.join(', ')")); } // http://crbug.com/633321 #if defined(OS_ANDROID) #define MAYBE_AllowVirtualTimeToAdvance DISABLED_AllowVirtualTimeToAdvance #else #define MAYBE_AllowVirtualTimeToAdvance AllowVirtualTimeToAdvance #endif TEST_F(VirtualTimeTest, MAYBE_AllowVirtualTimeToAdvance) { WebView().Scheduler()->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kPause); ExecuteJavaScript( "var run_order = [];" "timerFn = function(delay, value) {" " setTimeout(function() { run_order.push(value); }, delay);" "};" "timerFn(100, 'a');" "timerFn(10, 'b');" "timerFn(1, 'c');"); test::RunPendingTasks(); EXPECT_EQ("", ExecuteJavaScript("run_order.join(', ')")); WebView().Scheduler()->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kAdvance); RunTasksForPeriod(1000); EXPECT_EQ("c, b, a", ExecuteJavaScript("run_order.join(', ')")); } // http://crbug.com/633321 #if defined(OS_ANDROID) #define MAYBE_VirtualTimeNotAllowedToAdvanceWhileResourcesLoading \ DISABLED_VirtualTimeNotAllowedToAdvanceWhileResourcesLoading #else #define MAYBE_VirtualTimeNotAllowedToAdvanceWhileResourcesLoading \ VirtualTimeNotAllowedToAdvanceWhileResourcesLoading #endif TEST_F(VirtualTimeTest, MAYBE_VirtualTimeNotAllowedToAdvanceWhileResourcesLoading) { WebView().Scheduler()->EnableVirtualTime(); WebView().Scheduler()->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kDeterministicLoading); EXPECT_TRUE(WebView().Scheduler()->VirtualTimeAllowedToAdvance()); SimRequest main_resource("https://example.com/test.html", "text/html"); SimRequest css_resource("https://example.com/test.css", "text/css"); // Loading, virtual time should not advance. LoadURL("https://example.com/test.html"); EXPECT_FALSE(WebView().Scheduler()->VirtualTimeAllowedToAdvance()); main_resource.Start(); // Still Loading, virtual time should not advance. main_resource.Write("<!DOCTYPE html><link rel=stylesheet href=test.css>"); EXPECT_FALSE(WebView().Scheduler()->VirtualTimeAllowedToAdvance()); // Still Loading, virtual time should not advance. css_resource.Start(); css_resource.Write("a { color: red; }"); EXPECT_FALSE(WebView().Scheduler()->VirtualTimeAllowedToAdvance()); // Still Loading, virtual time should not advance. css_resource.Finish(); EXPECT_FALSE(WebView().Scheduler()->VirtualTimeAllowedToAdvance()); // Still Loading, virtual time should not advance. main_resource.Write("<body>"); EXPECT_FALSE(WebView().Scheduler()->VirtualTimeAllowedToAdvance()); // Finished loading, virtual time should be able to advance. main_resource.Finish(); EXPECT_TRUE(WebView().Scheduler()->VirtualTimeAllowedToAdvance()); // The loading events are delayed for 10 virtual ms after they have run, we // let tasks run for a little while to ensure we don't get any asserts on // teardown as a result. RunTasksForPeriod(10); } // http://crbug.com/633321 #if defined(OS_ANDROID) #define MAYBE_DOMTimersSuspended DISABLED_DOMTimersSuspended #else #define MAYBE_DOMTimersSuspended DOMTimersSuspended #endif TEST_F(VirtualTimeTest, MAYBE_DOMTimersSuspended) { WebView().Scheduler()->EnableVirtualTime(); WebView().Scheduler()->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kAdvance); // Schedule normal DOM timers to run at 1s and 1.001s in the future. ExecuteJavaScript( "var run_order = [];" "setTimeout(() => { run_order.push(1); }, 1000);" "setTimeout(() => { run_order.push(2); }, 1001);"); scoped_refptr<base::SingleThreadTaskRunner> runner = Window().GetExecutionContext()->GetTaskRunner(TaskType::kJavascriptTimer); // Schedule a task to suspend virtual time at the same point in time. runner->PostDelayedTask(FROM_HERE, WTF::Bind( [](PageScheduler* scheduler) { scheduler->SetVirtualTimePolicy( PageScheduler::VirtualTimePolicy::kPause); }, WTF::Unretained(WebView().Scheduler())), TimeDelta::FromMilliseconds(1000)); // ALso schedule a third timer for the same point in time. ExecuteJavaScript("setTimeout(() => { run_order.push(2); }, 1000);"); // The second DOM timer shouldn't have run because the virtual time budget // expired. test::RunPendingTasks(); EXPECT_EQ("1, 2", ExecuteJavaScript("run_order.join(', ')")); } #undef MAYBE_DOMTimersFireInExpectedOrder #undef MAYBE_SetInterval #undef MAYBE_AllowVirtualTimeToAdvance #undef MAYBE_VirtualTimeNotAllowedToAdvanceWhileResourcesLoading #undef MAYBE_DOMTimersSuspended } // namespace virtual_time_test } // namespace blink
null
null
null
null
29,257
39,943
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
204,938
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * security/tomoyo/tomoyo.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/lsm_hooks.h> #include "common.h" /** * tomoyo_cred_alloc_blank - Target for security_cred_alloc_blank(). * * @new: Pointer to "struct cred". * @gfp: Memory allocation flags. * * Returns 0. */ static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp) { new->security = NULL; return 0; } /** * tomoyo_cred_prepare - Target for security_prepare_creds(). * * @new: Pointer to "struct cred". * @old: Pointer to "struct cred". * @gfp: Memory allocation flags. * * Returns 0. */ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { struct tomoyo_domain_info *domain = old->security; new->security = domain; if (domain) atomic_inc(&domain->users); return 0; } /** * tomoyo_cred_transfer - Target for security_transfer_creds(). * * @new: Pointer to "struct cred". * @old: Pointer to "struct cred". */ static void tomoyo_cred_transfer(struct cred *new, const struct cred *old) { tomoyo_cred_prepare(new, old, 0); } /** * tomoyo_cred_free - Target for security_cred_free(). * * @cred: Pointer to "struct cred". */ static void tomoyo_cred_free(struct cred *cred) { struct tomoyo_domain_info *domain = cred->security; if (domain) atomic_dec(&domain->users); } /** * tomoyo_bprm_set_creds - Target for security_bprm_set_creds(). * * @bprm: Pointer to "struct linux_binprm". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) { /* * Do only if this function is called for the first time of an execve * operation. */ if (bprm->cred_prepared) return 0; #ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER /* * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested * for the first time. */ if (!tomoyo_policy_loaded) tomoyo_load_policy(bprm->filename); #endif /* * Release reference to "struct tomoyo_domain_info" stored inside * "bprm->cred->security". New reference to "struct tomoyo_domain_info" * stored inside "bprm->cred->security" will be acquired later inside * tomoyo_find_next_domain(). */ atomic_dec(&((struct tomoyo_domain_info *) bprm->cred->security)->users); /* * Tell tomoyo_bprm_check_security() is called for the first time of an * execve operation. */ bprm->cred->security = NULL; return 0; } /** * tomoyo_bprm_check_security - Target for security_bprm_check(). * * @bprm: Pointer to "struct linux_binprm". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) { struct tomoyo_domain_info *domain = bprm->cred->security; /* * Execute permission is checked against pathname passed to do_execve() * using current domain. */ if (!domain) { const int idx = tomoyo_read_lock(); const int err = tomoyo_find_next_domain(bprm); tomoyo_read_unlock(idx); return err; } /* * Read permission is checked against interpreters using next domain. */ return tomoyo_check_open_permission(domain, &bprm->file->f_path, O_RDONLY); } /** * tomoyo_inode_getattr - Target for security_inode_getattr(). * * @mnt: Pointer to "struct vfsmount". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_inode_getattr(const struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL); } /** * tomoyo_path_truncate - Target for security_path_truncate(). * * @path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_truncate(const struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL); } /** * tomoyo_path_unlink - Target for security_path_unlink(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL); } /** * tomoyo_path_mkdir - Target for security_path_mkdir(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @mode: DAC permission mode. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry, umode_t mode) { struct path path = { parent->mnt, dentry }; return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path, mode & S_IALLUGO); } /** * tomoyo_path_rmdir - Target for security_path_rmdir(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL); } /** * tomoyo_path_symlink - Target for security_path_symlink(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @old_name: Symlink's content. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry, const char *old_name) { struct path path = { parent->mnt, dentry }; return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name); } /** * tomoyo_path_mknod - Target for security_path_mknod(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @mode: DAC permission mode. * @dev: Device attributes. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_mknod(const struct path *parent, struct dentry *dentry, umode_t mode, unsigned int dev) { struct path path = { parent->mnt, dentry }; int type = TOMOYO_TYPE_CREATE; const unsigned int perm = mode & S_IALLUGO; switch (mode & S_IFMT) { case S_IFCHR: type = TOMOYO_TYPE_MKCHAR; break; case S_IFBLK: type = TOMOYO_TYPE_MKBLOCK; break; default: goto no_dev; } return tomoyo_mkdev_perm(type, &path, perm, dev); no_dev: switch (mode & S_IFMT) { case S_IFIFO: type = TOMOYO_TYPE_MKFIFO; break; case S_IFSOCK: type = TOMOYO_TYPE_MKSOCK; break; } return tomoyo_path_number_perm(type, &path, perm); } /** * tomoyo_path_link - Target for security_path_link(). * * @old_dentry: Pointer to "struct dentry". * @new_dir: Pointer to "struct path". * @new_dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { struct path path1 = { new_dir->mnt, old_dentry }; struct path path2 = { new_dir->mnt, new_dentry }; return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2); } /** * tomoyo_path_rename - Target for security_path_rename(). * * @old_parent: Pointer to "struct path". * @old_dentry: Pointer to "struct dentry". * @new_parent: Pointer to "struct path". * @new_dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_rename(const struct path *old_parent, struct dentry *old_dentry, const struct path *new_parent, struct dentry *new_dentry) { struct path path1 = { old_parent->mnt, old_dentry }; struct path path2 = { new_parent->mnt, new_dentry }; return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2); } /** * tomoyo_file_fcntl - Target for security_file_fcntl(). * * @file: Pointer to "struct file". * @cmd: Command for fcntl(). * @arg: Argument for @cmd. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path, O_WRONLY | (arg & O_APPEND)); } /** * tomoyo_file_open - Target for security_file_open(). * * @f: Pointer to "struct file". * @cred: Pointer to "struct cred". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_open(struct file *f, const struct cred *cred) { int flags = f->f_flags; /* Don't check read permission here if called from do_execve(). */ if (current->in_execve) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags); } /** * tomoyo_file_ioctl - Target for security_file_ioctl(). * * @file: Pointer to "struct file". * @cmd: Command for ioctl(). * @arg: Argument for @cmd. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd); } /** * tomoyo_path_chmod - Target for security_path_chmod(). * * @path: Pointer to "struct path". * @mode: DAC permission mode. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chmod(const struct path *path, umode_t mode) { return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path, mode & S_IALLUGO); } /** * tomoyo_path_chown - Target for security_path_chown(). * * @path: Pointer to "struct path". * @uid: Owner ID. * @gid: Group ID. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { int error = 0; if (uid_valid(uid)) error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, from_kuid(&init_user_ns, uid)); if (!error && gid_valid(gid)) error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, from_kgid(&init_user_ns, gid)); return error; } /** * tomoyo_path_chroot - Target for security_path_chroot(). * * @path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chroot(const struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL); } /** * tomoyo_sb_mount - Target for security_sb_mount(). * * @dev_name: Name of device file. Maybe NULL. * @path: Pointer to "struct path". * @type: Name of filesystem type. Maybe NULL. * @flags: Mount options. * @data: Optional data. Maybe NULL. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { return tomoyo_mount_permission(dev_name, path, type, flags, data); } /** * tomoyo_sb_umount - Target for security_sb_umount(). * * @mnt: Pointer to "struct vfsmount". * @flags: Unmount options. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_umount(struct vfsmount *mnt, int flags) { struct path path = { mnt, mnt->mnt_root }; return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL); } /** * tomoyo_sb_pivotroot - Target for security_sb_pivotroot(). * * @old_path: Pointer to "struct path". * @new_path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_pivotroot(const struct path *old_path, const struct path *new_path) { return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path); } /** * tomoyo_socket_listen - Check permission for listen(). * * @sock: Pointer to "struct socket". * @backlog: Backlog parameter. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_listen(struct socket *sock, int backlog) { return tomoyo_socket_listen_permission(sock); } /** * tomoyo_socket_connect - Check permission for connect(). * * @sock: Pointer to "struct socket". * @addr: Pointer to "struct sockaddr". * @addr_len: Size of @addr. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr, int addr_len) { return tomoyo_socket_connect_permission(sock, addr, addr_len); } /** * tomoyo_socket_bind - Check permission for bind(). * * @sock: Pointer to "struct socket". * @addr: Pointer to "struct sockaddr". * @addr_len: Size of @addr. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { return tomoyo_socket_bind_permission(sock, addr, addr_len); } /** * tomoyo_socket_sendmsg - Check permission for sendmsg(). * * @sock: Pointer to "struct socket". * @msg: Pointer to "struct msghdr". * @size: Size of message. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return tomoyo_socket_sendmsg_permission(sock, msg, size); } /* * tomoyo_security_ops is a "struct security_operations" which is used for * registering TOMOYO. */ static struct security_hook_list tomoyo_hooks[] = { LSM_HOOK_INIT(cred_alloc_blank, tomoyo_cred_alloc_blank), LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare), LSM_HOOK_INIT(cred_transfer, tomoyo_cred_transfer), LSM_HOOK_INIT(cred_free, tomoyo_cred_free), LSM_HOOK_INIT(bprm_set_creds, tomoyo_bprm_set_creds), LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security), LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl), LSM_HOOK_INIT(file_open, tomoyo_file_open), LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate), LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink), LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir), LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir), LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink), LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod), LSM_HOOK_INIT(path_link, tomoyo_path_link), LSM_HOOK_INIT(path_rename, tomoyo_path_rename), LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr), LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl), LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod), LSM_HOOK_INIT(path_chown, tomoyo_path_chown), LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot), LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount), LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount), LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot), LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind), LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect), LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen), LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg), }; /* Lock for GC. */ DEFINE_SRCU(tomoyo_ss); /** * tomoyo_init - Register TOMOYO Linux as a LSM module. * * Returns 0. */ static int __init tomoyo_init(void) { struct cred *cred = (struct cred *) current_cred(); if (!security_module_enable("tomoyo")) return 0; /* register ourselves with the security framework */ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), "tomoyo"); printk(KERN_INFO "TOMOYO Linux initialized\n"); cred->security = &tomoyo_kernel_domain; tomoyo_mm_init(); return 0; } security_initcall(tomoyo_init);
null
null
null
null
113,285
6,784
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
6,784
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/proxy_resolution/proxy_resolver_v8.h" #include "base/compiler_specific.h" #include "base/files/file_util.h" #include "base/path_service.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "net/base/completion_callback.h" #include "net/base/net_errors.h" #include "net/proxy_resolution/pac_file_data.h" #include "net/proxy_resolution/proxy_info.h" #include "net/test/gtest_util.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "url/gurl.h" using net::test::IsError; using net::test::IsOk; using ::testing::IsEmpty; namespace net { namespace { // Javascript bindings for ProxyResolverV8, which returns mock values. // Each time one of the bindings is called into, we push the input into a // list, for later verification. class MockJSBindings : public ProxyResolverV8::JSBindings { public: MockJSBindings() : my_ip_address_count(0), my_ip_address_ex_count(0), should_terminate(false) {} void Alert(const base::string16& message) override { VLOG(1) << "PAC-alert: " << message; // Helpful when debugging. alerts.push_back(base::UTF16ToUTF8(message)); } bool ResolveDns(const std::string& host, ResolveDnsOperation op, std::string* output, bool* terminate) override { *terminate = should_terminate; if (op == MY_IP_ADDRESS) { my_ip_address_count++; *output = my_ip_address_result; return !my_ip_address_result.empty(); } if (op == MY_IP_ADDRESS_EX) { my_ip_address_ex_count++; *output = my_ip_address_ex_result; return !my_ip_address_ex_result.empty(); } if (op == DNS_RESOLVE) { dns_resolves.push_back(host); *output = dns_resolve_result; return !dns_resolve_result.empty(); } if (op == DNS_RESOLVE_EX) { dns_resolves_ex.push_back(host); *output = dns_resolve_ex_result; return !dns_resolve_ex_result.empty(); } CHECK(false); return false; } void OnError(int line_number, const base::string16& message) override { // Helpful when debugging. VLOG(1) << "PAC-error: [" << line_number << "] " << message; errors.push_back(base::UTF16ToUTF8(message)); errors_line_number.push_back(line_number); } // Mock values to return. std::string my_ip_address_result; std::string my_ip_address_ex_result; std::string dns_resolve_result; std::string dns_resolve_ex_result; // Inputs we got called with. std::vector<std::string> alerts; std::vector<std::string> errors; std::vector<int> errors_line_number; std::vector<std::string> dns_resolves; std::vector<std::string> dns_resolves_ex; int my_ip_address_count; int my_ip_address_ex_count; // Whether ResolveDns() should terminate script execution. bool should_terminate; }; class ProxyResolverV8Test : public testing::Test { public: // Creates a ProxyResolverV8 using the PAC script contained in |filename|. If // called more than once, the previous ProxyResolverV8 is deleted. int CreateResolver(const char* filename) { base::FilePath path; PathService::Get(base::DIR_SOURCE_ROOT, &path); path = path.AppendASCII("net"); path = path.AppendASCII("data"); path = path.AppendASCII("proxy_resolver_v8_unittest"); path = path.AppendASCII(filename); // Try to read the file from disk. std::string file_contents; bool ok = base::ReadFileToString(path, &file_contents); // If we can't load the file from disk, something is misconfigured. if (!ok) { LOG(ERROR) << "Failed to read file: " << path.value(); return ERR_FAILED; } // Create the ProxyResolver using the PAC script. return ProxyResolverV8::Create(PacFileData::FromUTF8(file_contents), bindings(), &resolver_); } ProxyResolverV8& resolver() { DCHECK(resolver_); return *resolver_; } MockJSBindings* bindings() { return &js_bindings_; } private: MockJSBindings js_bindings_; std::unique_ptr<ProxyResolverV8> resolver_; }; // Doesn't really matter what these values are for many of the tests. const GURL kQueryUrl("http://www.google.com"); const GURL kPacUrl; TEST_F(ProxyResolverV8Test, Direct) { ASSERT_THAT(CreateResolver("direct.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_TRUE(proxy_info.is_direct()); EXPECT_EQ(0U, bindings()->alerts.size()); EXPECT_EQ(0U, bindings()->errors.size()); } TEST_F(ProxyResolverV8Test, ReturnEmptyString) { ASSERT_THAT(CreateResolver("return_empty_string.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_TRUE(proxy_info.is_direct()); EXPECT_EQ(0U, bindings()->alerts.size()); EXPECT_EQ(0U, bindings()->errors.size()); } TEST_F(ProxyResolverV8Test, Basic) { ASSERT_THAT(CreateResolver("passthrough.js"), IsOk()); // The "FindProxyForURL" of this PAC script simply concatenates all of the // arguments into a pseudo-host. The purpose of this test is to verify that // the correct arguments are being passed to FindProxyForURL(). { ProxyInfo proxy_info; int result = resolver().GetProxyForURL(GURL("http://query.com/path"), &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_EQ("http.query.com.path.query.com:80", proxy_info.proxy_server().ToURI()); } { ProxyInfo proxy_info; int result = resolver().GetProxyForURL(GURL("ftp://query.com:90/path"), &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); // Note that FindProxyForURL(url, host) does not expect |host| to contain // the port number. EXPECT_EQ("ftp.query.com.90.path.query.com:80", proxy_info.proxy_server().ToURI()); EXPECT_EQ(0U, bindings()->alerts.size()); EXPECT_EQ(0U, bindings()->errors.size()); } } TEST_F(ProxyResolverV8Test, BadReturnType) { // These are the filenames of PAC scripts which each return a non-string // types for FindProxyForURL(). They should all fail with // ERR_PAC_SCRIPT_FAILED. static const char* const filenames[] = { "return_undefined.js", "return_integer.js", "return_function.js", "return_object.js", // TODO(eroman): Should 'null' be considered equivalent to "DIRECT" ? "return_null.js"}; for (size_t i = 0; i < arraysize(filenames); ++i) { ASSERT_THAT(CreateResolver(filenames[i]), IsOk()); MockJSBindings bindings; ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, &bindings); EXPECT_THAT(result, IsError(ERR_PAC_SCRIPT_FAILED)); EXPECT_EQ(0U, bindings.alerts.size()); ASSERT_EQ(1U, bindings.errors.size()); EXPECT_EQ("FindProxyForURL() did not return a string.", bindings.errors[0]); EXPECT_EQ(-1, bindings.errors_line_number[0]); } } // Try using a PAC script which defines no "FindProxyForURL" function. TEST_F(ProxyResolverV8Test, NoEntryPoint) { EXPECT_THAT(CreateResolver("no_entrypoint.js"), IsError(ERR_PAC_SCRIPT_FAILED)); ASSERT_EQ(1U, bindings()->errors.size()); EXPECT_EQ("FindProxyForURL is undefined or not a function.", bindings()->errors[0]); EXPECT_EQ(-1, bindings()->errors_line_number[0]); } // Try loading a malformed PAC script. TEST_F(ProxyResolverV8Test, ParseError) { EXPECT_THAT(CreateResolver("missing_close_brace.js"), IsError(ERR_PAC_SCRIPT_FAILED)); EXPECT_EQ(0U, bindings()->alerts.size()); // We get one error during compilation. ASSERT_EQ(1U, bindings()->errors.size()); EXPECT_EQ("Uncaught SyntaxError: Unexpected end of input", bindings()->errors[0]); EXPECT_EQ(5, bindings()->errors_line_number[0]); } // Run a PAC script several times, which has side-effects. TEST_F(ProxyResolverV8Test, SideEffects) { ASSERT_THAT(CreateResolver("side_effects.js"), IsOk()); // The PAC script increments a counter each time we invoke it. for (int i = 0; i < 3; ++i) { ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_EQ(base::StringPrintf("sideffect_%d:80", i), proxy_info.proxy_server().ToURI()); } // Reload the script -- the javascript environment should be reset, hence // the counter starts over. ASSERT_THAT(CreateResolver("side_effects.js"), IsOk()); for (int i = 0; i < 3; ++i) { ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_EQ(base::StringPrintf("sideffect_%d:80", i), proxy_info.proxy_server().ToURI()); } } // Execute a PAC script which throws an exception in FindProxyForURL. TEST_F(ProxyResolverV8Test, UnhandledException) { ASSERT_THAT(CreateResolver("unhandled_exception.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsError(ERR_PAC_SCRIPT_FAILED)); EXPECT_EQ(0U, bindings()->alerts.size()); ASSERT_EQ(1U, bindings()->errors.size()); EXPECT_EQ("Uncaught ReferenceError: undefined_variable is not defined", bindings()->errors[0]); EXPECT_EQ(3, bindings()->errors_line_number[0]); } // Execute a PAC script which throws an exception when first accessing // FindProxyForURL TEST_F(ProxyResolverV8Test, ExceptionAccessingFindProxyForURLDuringInit) { EXPECT_EQ(ERR_PAC_SCRIPT_FAILED, CreateResolver("exception_findproxyforurl_during_init.js")); ASSERT_EQ(2U, bindings()->errors.size()); EXPECT_EQ("Uncaught crash!", bindings()->errors[0]); EXPECT_EQ(9, bindings()->errors_line_number[0]); EXPECT_EQ("Accessing FindProxyForURL threw an exception.", bindings()->errors[1]); EXPECT_EQ(-1, bindings()->errors_line_number[1]); } // Execute a PAC script which throws an exception during the second access to // FindProxyForURL TEST_F(ProxyResolverV8Test, ExceptionAccessingFindProxyForURLDuringResolve) { ASSERT_THAT(CreateResolver("exception_findproxyforurl_during_resolve.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsError(ERR_PAC_SCRIPT_FAILED)); ASSERT_EQ(2U, bindings()->errors.size()); EXPECT_EQ("Uncaught crash!", bindings()->errors[0]); EXPECT_EQ(17, bindings()->errors_line_number[0]); EXPECT_EQ("Accessing FindProxyForURL threw an exception.", bindings()->errors[1]); EXPECT_EQ(-1, bindings()->errors_line_number[1]); } TEST_F(ProxyResolverV8Test, ReturnUnicode) { ASSERT_THAT(CreateResolver("return_unicode.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); // The result from this resolve was unparseable, because it // wasn't ASCII. EXPECT_THAT(result, IsError(ERR_PAC_SCRIPT_FAILED)); } // Test the PAC library functions that we expose in the JS environment. TEST_F(ProxyResolverV8Test, JavascriptLibrary) { ASSERT_THAT(CreateResolver("pac_library_unittest.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); // If the javascript side of this unit-test fails, it will throw a javascript // exception. Otherwise it will return "PROXY success:80". EXPECT_THAT(bindings()->alerts, IsEmpty()); EXPECT_THAT(bindings()->errors, IsEmpty()); ASSERT_THAT(result, IsOk()); EXPECT_EQ("success:80", proxy_info.proxy_server().ToURI()); } // Test marshalling/un-marshalling of values between C++/V8. TEST_F(ProxyResolverV8Test, V8Bindings) { ASSERT_THAT(CreateResolver("bindings.js"), IsOk()); bindings()->dns_resolve_result = "127.0.0.1"; ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_TRUE(proxy_info.is_direct()); EXPECT_EQ(0U, bindings()->errors.size()); // Alert was called 5 times. ASSERT_EQ(5U, bindings()->alerts.size()); EXPECT_EQ("undefined", bindings()->alerts[0]); EXPECT_EQ("null", bindings()->alerts[1]); EXPECT_EQ("undefined", bindings()->alerts[2]); EXPECT_EQ("[object Object]", bindings()->alerts[3]); EXPECT_EQ("exception from calling toString()", bindings()->alerts[4]); // DnsResolve was called 8 times, however only 2 of those were string // parameters. (so 6 of them failed immediately). ASSERT_EQ(2U, bindings()->dns_resolves.size()); EXPECT_EQ("", bindings()->dns_resolves[0]); EXPECT_EQ("arg1", bindings()->dns_resolves[1]); // MyIpAddress was called two times. EXPECT_EQ(2, bindings()->my_ip_address_count); // MyIpAddressEx was called once. EXPECT_EQ(1, bindings()->my_ip_address_ex_count); // DnsResolveEx was called 2 times. ASSERT_EQ(2U, bindings()->dns_resolves_ex.size()); EXPECT_EQ("is_resolvable", bindings()->dns_resolves_ex[0]); EXPECT_EQ("foobar", bindings()->dns_resolves_ex[1]); } // Test calling a binding (myIpAddress()) from the script's global scope. // http://crbug.com/40026 TEST_F(ProxyResolverV8Test, BindingCalledDuringInitialization) { ASSERT_THAT(CreateResolver("binding_from_global.js"), IsOk()); // myIpAddress() got called during initialization of the script. EXPECT_EQ(1, bindings()->my_ip_address_count); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_FALSE(proxy_info.is_direct()); EXPECT_EQ("127.0.0.1:80", proxy_info.proxy_server().ToURI()); // Check that no other bindings were called. EXPECT_EQ(0U, bindings()->errors.size()); ASSERT_EQ(0U, bindings()->alerts.size()); ASSERT_EQ(0U, bindings()->dns_resolves.size()); EXPECT_EQ(0, bindings()->my_ip_address_ex_count); ASSERT_EQ(0U, bindings()->dns_resolves_ex.size()); } // Try loading a PAC script that ends with a comment and has no terminal // newline. This should not cause problems with the PAC utility functions // that we add to the script's environment. // http://crbug.com/22864 TEST_F(ProxyResolverV8Test, EndsWithCommentNoNewline) { ASSERT_THAT(CreateResolver("ends_with_comment.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_FALSE(proxy_info.is_direct()); EXPECT_EQ("success:80", proxy_info.proxy_server().ToURI()); } // Try loading a PAC script that ends with a statement and has no terminal // newline. This should not cause problems with the PAC utility functions // that we add to the script's environment. // http://crbug.com/22864 TEST_F(ProxyResolverV8Test, EndsWithStatementNoNewline) { ASSERT_THAT(CreateResolver("ends_with_statement_no_semicolon.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_FALSE(proxy_info.is_direct()); EXPECT_EQ("success:3", proxy_info.proxy_server().ToURI()); } // Test the return values from myIpAddress(), myIpAddressEx(), dnsResolve(), // dnsResolveEx(), isResolvable(), isResolvableEx(), when the the binding // returns empty string (failure). This simulates the return values from // those functions when the underlying DNS resolution fails. TEST_F(ProxyResolverV8Test, DNSResolutionFailure) { ASSERT_THAT(CreateResolver("dns_fail.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_FALSE(proxy_info.is_direct()); EXPECT_EQ("success:80", proxy_info.proxy_server().ToURI()); } TEST_F(ProxyResolverV8Test, DNSResolutionOfInternationDomainName) { ASSERT_THAT(CreateResolver("international_domain_names.js"), IsOk()); // Execute FindProxyForURL(). ProxyInfo proxy_info; int result = resolver().GetProxyForURL(kQueryUrl, &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_TRUE(proxy_info.is_direct()); // Check that the international domain name was converted to punycode // before passing it onto the bindings layer. ASSERT_EQ(1u, bindings()->dns_resolves.size()); EXPECT_EQ("xn--bcher-kva.ch", bindings()->dns_resolves[0]); ASSERT_EQ(1u, bindings()->dns_resolves_ex.size()); EXPECT_EQ("xn--bcher-kva.ch", bindings()->dns_resolves_ex[0]); } // Test that when resolving a URL which contains an IPv6 string literal, the // brackets are removed from the host before passing it down to the PAC script. // If we don't do this, then subsequent calls to dnsResolveEx(host) will be // doomed to fail since it won't correspond with a valid name. TEST_F(ProxyResolverV8Test, IPv6HostnamesNotBracketed) { ASSERT_THAT(CreateResolver("resolve_host.js"), IsOk()); ProxyInfo proxy_info; int result = resolver().GetProxyForURL( GURL("http://[abcd::efff]:99/watsupdawg"), &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_TRUE(proxy_info.is_direct()); // We called dnsResolveEx() exactly once, by passing through the "host" // argument to FindProxyForURL(). The brackets should have been stripped. ASSERT_EQ(1U, bindings()->dns_resolves_ex.size()); EXPECT_EQ("abcd::efff", bindings()->dns_resolves_ex[0]); } // Test that terminating a script within DnsResolve() leads to eventual // termination of the script. Also test that repeatedly calling terminate is // safe, and running the script again after termination still works. TEST_F(ProxyResolverV8Test, Terminate) { ASSERT_THAT(CreateResolver("terminate.js"), IsOk()); // Terminate script execution upon reaching dnsResolve(). Note that // termination may not take effect right away (so the subsequent dnsResolve() // and alert() may be run). bindings()->should_terminate = true; ProxyInfo proxy_info; int result = resolver().GetProxyForURL(GURL("http://hang/"), &proxy_info, bindings()); // The script execution was terminated. EXPECT_THAT(result, IsError(ERR_PAC_SCRIPT_FAILED)); EXPECT_EQ(1U, bindings()->dns_resolves.size()); EXPECT_GE(2U, bindings()->dns_resolves_ex.size()); EXPECT_GE(1U, bindings()->alerts.size()); EXPECT_EQ(1U, bindings()->errors.size()); // Termination shows up as an uncaught exception without any message. EXPECT_EQ("", bindings()->errors[0]); bindings()->errors.clear(); // Try running the script again, this time with a different input which won't // cause a termination+hang. result = resolver().GetProxyForURL(GURL("http://kittens/"), &proxy_info, bindings()); EXPECT_THAT(result, IsOk()); EXPECT_EQ(0u, bindings()->errors.size()); EXPECT_EQ("kittens:88", proxy_info.proxy_server().ToURI()); } } // namespace } // namespace net
null
null
null
null
3,647
25,361
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,361
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "extensions/browser/extension_api_frame_id_map.h" #include <tuple> #include <utility> #include "base/logging.h" #include "base/metrics/histogram_macros.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/navigation_handle.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/web_contents.h" #include "content/public/common/child_process_host.h" #include "extensions/browser/extensions_browser_client.h" #include "extensions/common/constants.h" namespace extensions { namespace { // The map is accessed on the IO and UI thread, so construct it once and never // delete it. base::LazyInstance<ExtensionApiFrameIdMap>::Leaky g_map_instance = LAZY_INSTANCE_INITIALIZER; bool IsFrameRoutingIdValid(int frame_routing_id) { // frame_routing_id == -2 = MSG_ROUTING_NONE -> not a RenderFrameHost. // frame_routing_id == -1 -> should be MSG_ROUTING_NONE, but there are // callers that use "-1" for unknown frames. return frame_routing_id > -1; } } // namespace const int ExtensionApiFrameIdMap::kInvalidFrameId = -1; const int ExtensionApiFrameIdMap::kTopFrameId = 0; ExtensionApiFrameIdMap::FrameData::FrameData() : frame_id(kInvalidFrameId), parent_frame_id(kInvalidFrameId), tab_id(extension_misc::kUnknownTabId), window_id(extension_misc::kUnknownWindowId) {} ExtensionApiFrameIdMap::FrameData::FrameData(int frame_id, int parent_frame_id, int tab_id, int window_id, GURL last_committed_main_frame_url) : frame_id(frame_id), parent_frame_id(parent_frame_id), tab_id(tab_id), window_id(window_id), last_committed_main_frame_url(std::move(last_committed_main_frame_url)) {} ExtensionApiFrameIdMap::FrameData::~FrameData() = default; ExtensionApiFrameIdMap::FrameData::FrameData( const ExtensionApiFrameIdMap::FrameData& other) = default; ExtensionApiFrameIdMap::FrameData& ExtensionApiFrameIdMap::FrameData::operator=( const ExtensionApiFrameIdMap::FrameData& other) = default; ExtensionApiFrameIdMap::RenderFrameIdKey::RenderFrameIdKey() : render_process_id(content::ChildProcessHost::kInvalidUniqueID), frame_routing_id(MSG_ROUTING_NONE) {} ExtensionApiFrameIdMap::RenderFrameIdKey::RenderFrameIdKey( int render_process_id, int frame_routing_id) : render_process_id(render_process_id), frame_routing_id(frame_routing_id) {} ExtensionApiFrameIdMap::FrameDataCallbacks::FrameDataCallbacks() : is_iterating(false) {} ExtensionApiFrameIdMap::FrameDataCallbacks::FrameDataCallbacks( const FrameDataCallbacks& other) = default; ExtensionApiFrameIdMap::FrameDataCallbacks::~FrameDataCallbacks() {} bool ExtensionApiFrameIdMap::RenderFrameIdKey::operator<( const RenderFrameIdKey& other) const { return std::tie(render_process_id, frame_routing_id) < std::tie(other.render_process_id, other.frame_routing_id); } bool ExtensionApiFrameIdMap::RenderFrameIdKey::operator==( const RenderFrameIdKey& other) const { return render_process_id == other.render_process_id && frame_routing_id == other.frame_routing_id; } ExtensionApiFrameIdMap::ExtensionApiFrameIdMap() { // The browser client can be null in unittests. if (ExtensionsBrowserClient::Get()) { helper_ = ExtensionsBrowserClient::Get()->CreateExtensionApiFrameIdMapHelper( this); } } ExtensionApiFrameIdMap::~ExtensionApiFrameIdMap() {} // static ExtensionApiFrameIdMap* ExtensionApiFrameIdMap::Get() { return g_map_instance.Pointer(); } // static int ExtensionApiFrameIdMap::GetFrameId(content::RenderFrameHost* rfh) { if (!rfh) return kInvalidFrameId; if (rfh->GetParent()) return rfh->GetFrameTreeNodeId(); return kTopFrameId; } // static int ExtensionApiFrameIdMap::GetFrameId( content::NavigationHandle* navigation_handle) { return navigation_handle->IsInMainFrame() ? kTopFrameId : navigation_handle->GetFrameTreeNodeId(); } // static int ExtensionApiFrameIdMap::GetParentFrameId(content::RenderFrameHost* rfh) { return rfh ? GetFrameId(rfh->GetParent()) : kInvalidFrameId; } // static int ExtensionApiFrameIdMap::GetParentFrameId( content::NavigationHandle* navigation_handle) { if (navigation_handle->IsInMainFrame()) return kInvalidFrameId; if (navigation_handle->IsParentMainFrame()) return kTopFrameId; return navigation_handle->GetParentFrame()->GetFrameTreeNodeId(); } // static content::RenderFrameHost* ExtensionApiFrameIdMap::GetRenderFrameHostById( content::WebContents* web_contents, int frame_id) { // Although it is technically possible to map |frame_id| to a RenderFrameHost // without WebContents, we choose to not do that because in the extension API // frameIds are only guaranteed to be meaningful in combination with a tabId. if (!web_contents) return nullptr; if (frame_id == kInvalidFrameId) return nullptr; if (frame_id == kTopFrameId) return web_contents->GetMainFrame(); DCHECK_GE(frame_id, 1); // Unfortunately, extension APIs do not know which process to expect for a // given frame ID, so we must use an unsafe API here that could return a // different RenderFrameHost than the caller may have expected (e.g., one that // changed after a cross-process navigation). return web_contents->UnsafeFindFrameByFrameTreeNodeId(frame_id); } ExtensionApiFrameIdMap::FrameData ExtensionApiFrameIdMap::KeyToValue( const RenderFrameIdKey& key) const { content::RenderFrameHost* rfh = content::RenderFrameHost::FromID( key.render_process_id, key.frame_routing_id); if (!rfh || !rfh->IsRenderFrameLive()) return FrameData(); content::WebContents* web_contents = content::WebContents::FromRenderFrameHost(rfh); // The RenderFrameHost may not have an associated WebContents in cases // such as interstitial pages. GURL last_committed_main_frame_url = web_contents ? web_contents->GetLastCommittedURL() : GURL(); int tab_id = extension_misc::kUnknownTabId; int window_id = extension_misc::kUnknownWindowId; if (helper_) helper_->PopulateTabData(rfh, &tab_id, &window_id); return FrameData(GetFrameId(rfh), GetParentFrameId(rfh), tab_id, window_id, std::move(last_committed_main_frame_url)); } ExtensionApiFrameIdMap::FrameData ExtensionApiFrameIdMap::LookupFrameDataOnUI( const RenderFrameIdKey& key, bool is_from_io) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); bool lookup_successful = false; FrameData data; FrameDataMap::const_iterator frame_id_iter = frame_data_map_.find(key); if (frame_id_iter != frame_data_map_.end()) { lookup_successful = true; data = frame_id_iter->second; } else { data = KeyToValue(key); // Don't save invalid values in the map. if (data.frame_id != kInvalidFrameId) { lookup_successful = true; auto kvpair = FrameDataMap::value_type(key, data); base::AutoLock lock(frame_data_map_lock_); frame_data_map_.insert(kvpair); } } // TODO(devlin): Depending on how the data looks, this may be removable after // a few cycles. Check back in M52 to see if it's still needed. if (is_from_io) { UMA_HISTOGRAM_BOOLEAN("Extensions.ExtensionFrameMapLookupSuccessful", lookup_successful); } return data; } void ExtensionApiFrameIdMap::ReceivedFrameDataOnIO( const RenderFrameIdKey& key, const FrameData& cached_frame_data) { DCHECK_CURRENTLY_ON(content::BrowserThread::IO); FrameDataCallbacksMap::iterator map_iter = callbacks_map_.find(key); if (map_iter == callbacks_map_.end()) { // Can happen if ReceivedFrameDataOnIO was called after the frame ID was // resolved (e.g. via GetFrameDataOnIO), but before PostTaskAndReply // replied. return; } FrameDataCallbacks& callbacks = map_iter->second; if (callbacks.is_iterating) return; callbacks.is_iterating = true; // Note: Extra items can be appended to |callbacks| during this loop if a // callback calls GetFrameDataOnIO(). for (std::list<FrameDataCallback>::iterator it = callbacks.callbacks.begin(); it != callbacks.callbacks.end(); ++it) { it->Run(cached_frame_data); } callbacks_map_.erase(key); } void ExtensionApiFrameIdMap::GetFrameDataOnIO( int render_process_id, int frame_routing_id, const FrameDataCallback& callback) { DCHECK_CURRENTLY_ON(content::BrowserThread::IO); // TODO(robwu): Enable assertion when all callers have been fixed. // DCHECK_EQ(MSG_ROUTING_NONE, -1); if (!IsFrameRoutingIdValid(frame_routing_id)) { callback.Run(FrameData()); return; } FrameData cached_frame_data; bool did_find_cached_frame_data = GetCachedFrameDataOnIO( render_process_id, frame_routing_id, &cached_frame_data); const RenderFrameIdKey key(render_process_id, frame_routing_id); FrameDataCallbacksMap::iterator map_iter = callbacks_map_.find(key); if (did_find_cached_frame_data) { // Value already cached, thread hopping is not needed. if (map_iter == callbacks_map_.end()) { // If the frame ID was cached, then it is likely that there are no pending // callbacks. So do not unnecessarily copy the callback, but run it. callback.Run(cached_frame_data); } else { map_iter->second.callbacks.push_back(callback); ReceivedFrameDataOnIO(key, cached_frame_data); } return; } // The key was seen for the first time (or the frame has been removed). // Hop to the UI thread to look up the extension API frame ID. callbacks_map_[key].callbacks.push_back(callback); content::BrowserThread::PostTaskAndReplyWithResult( content::BrowserThread::UI, FROM_HERE, base::Bind(&ExtensionApiFrameIdMap::LookupFrameDataOnUI, base::Unretained(this), key, true /* is_from_io */), base::Bind(&ExtensionApiFrameIdMap::ReceivedFrameDataOnIO, base::Unretained(this), key)); } bool ExtensionApiFrameIdMap::GetCachedFrameDataOnIO(int render_process_id, int frame_routing_id, FrameData* frame_data_out) { DCHECK_CURRENTLY_ON(content::BrowserThread::IO); // TODO(robwu): Enable assertion when all callers have been fixed. // DCHECK_EQ(MSG_ROUTING_NONE, -1); if (!IsFrameRoutingIdValid(frame_routing_id)) return false; // A valid routing ID is only meaningful with a valid process ID. DCHECK_GE(render_process_id, 0); bool found = false; { base::AutoLock lock(frame_data_map_lock_); FrameDataMap::const_iterator frame_id_iter = frame_data_map_.find( RenderFrameIdKey(render_process_id, frame_routing_id)); if (frame_id_iter != frame_data_map_.end()) { // This is very likely to happen because CacheFrameData() is called as // soon as the frame is created. *frame_data_out = frame_id_iter->second; found = true; } } // TODO(devlin): Depending on how the data looks, this may be removable after // a few cycles. Check back in M52 to see if it's still needed. UMA_HISTOGRAM_BOOLEAN("Extensions.ExtensionFrameMapCacheHit", found); return found; } ExtensionApiFrameIdMap::FrameData ExtensionApiFrameIdMap::GetFrameData( content::RenderFrameHost* rfh) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); if (!rfh) return FrameData(); const RenderFrameIdKey key(rfh->GetProcess()->GetID(), rfh->GetRoutingID()); return LookupFrameDataOnUI(key, false /* is_from_io */); } void ExtensionApiFrameIdMap::InitializeRenderFrameData( content::RenderFrameHost* rfh) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); DCHECK(rfh); DCHECK(rfh->IsRenderFrameLive()); const RenderFrameIdKey key(rfh->GetProcess()->GetID(), rfh->GetRoutingID()); CacheFrameData(key); DCHECK(frame_data_map_.find(key) != frame_data_map_.end()); } void ExtensionApiFrameIdMap::CacheFrameData(const RenderFrameIdKey& key) { LookupFrameDataOnUI(key, false /* is_from_io */); } void ExtensionApiFrameIdMap::OnRenderFrameDeleted( content::RenderFrameHost* rfh) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); DCHECK(rfh); const RenderFrameIdKey key(rfh->GetProcess()->GetID(), rfh->GetRoutingID()); RemoveFrameData(key); } void ExtensionApiFrameIdMap::UpdateTabAndWindowId( int tab_id, int window_id, content::RenderFrameHost* rfh) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); DCHECK(rfh); const RenderFrameIdKey key(rfh->GetProcess()->GetID(), rfh->GetRoutingID()); // Only track FrameData for live render frames. if (!rfh->IsRenderFrameLive()) { return; } base::AutoLock lock(frame_data_map_lock_); FrameDataMap::iterator iter = frame_data_map_.find(key); // The FrameData for |rfh| should have already been initialized. DCHECK(iter != frame_data_map_.end()); iter->second.tab_id = tab_id; iter->second.window_id = window_id; } void ExtensionApiFrameIdMap::OnMainFrameReadyToCommitNavigation( content::NavigationHandle* navigation_handle) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); DCHECK(navigation_handle->IsInMainFrame()); bool did_insert = false; std::tie(std::ignore, did_insert) = ready_to_commit_document_navigations_.insert(navigation_handle); DCHECK(did_insert); content::RenderFrameHost* main_frame = navigation_handle->GetRenderFrameHost(); DCHECK(main_frame); // We only track live frames. if (!main_frame->IsRenderFrameLive()) return; const RenderFrameIdKey key(main_frame->GetProcess()->GetID(), main_frame->GetRoutingID()); base::AutoLock lock(frame_data_map_lock_); FrameDataMap::iterator iter = frame_data_map_.find(key); // We must have already cached the FrameData for this in // InitializeRenderFrameHost. DCHECK(iter != frame_data_map_.end()); iter->second.pending_main_frame_url = navigation_handle->GetURL(); } void ExtensionApiFrameIdMap::OnMainFrameDidFinishNavigation( content::NavigationHandle* navigation_handle) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); DCHECK(navigation_handle->IsInMainFrame()); bool did_fire_ready_to_commit_navigation = !!ready_to_commit_document_navigations_.erase(navigation_handle); // It's safe to call NavigationHandle::GetRenderFrameHost here iff the // navigation committed or a ReadyToCommitNavigation event was dispatched for // this navigation. // Note a RenderFrameHost might not be associated with the NavigationHandle in // WebContentsObserver::DidFinishNavigation. This might happen when the // navigation doesn't commit which might happen for a variety of reasons like // the network network request to fetch the navigation url failed, the // navigation was cancelled, by say a NavigationThrottle etc. // There's nothing to do if the RenderFrameHost can't be fetched for this // navigation. bool can_fetch_render_frame_host = navigation_handle->HasCommitted() || did_fire_ready_to_commit_navigation; if (!can_fetch_render_frame_host) return; content::RenderFrameHost* main_frame = navigation_handle->GetRenderFrameHost(); DCHECK(main_frame); // We only track live frames. if (!main_frame->IsRenderFrameLive()) return; const RenderFrameIdKey key(main_frame->GetProcess()->GetID(), main_frame->GetRoutingID()); base::AutoLock lock(frame_data_map_lock_); FrameDataMap::iterator iter = frame_data_map_.find(key); // We must have already cached the FrameData for this in // InitializeRenderFrameHost. DCHECK(iter != frame_data_map_.end()); iter->second.last_committed_main_frame_url = main_frame->GetLastCommittedURL(); iter->second.pending_main_frame_url = base::nullopt; } bool ExtensionApiFrameIdMap::HasCachedFrameDataForTesting( content::RenderFrameHost* rfh) const { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); if (!rfh) return false; const RenderFrameIdKey key(rfh->GetProcess()->GetID(), rfh->GetRoutingID()); return frame_data_map_.find(key) != frame_data_map_.end(); } size_t ExtensionApiFrameIdMap::GetFrameDataCountForTesting() const { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); return frame_data_map_.size(); } void ExtensionApiFrameIdMap::RemoveFrameData(const RenderFrameIdKey& key) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); base::AutoLock lock(frame_data_map_lock_); frame_data_map_.erase(key); } } // namespace extensions
null
null
null
null
22,224
977
28
train_val
ce683e5f9d045e5d67d1312a42b359cb2ab2a13c
165,972
linux
1
https://github.com/torvalds/linux
2016-04-14 00:30:37+02:00
static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } if (!arp_checkentry(&e->arp)) return -EINVAL; err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_debug("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
CVE-2016-4997
CWE-264
https://github.com/torvalds/linux/commit/ce683e5f9d045e5d67d1312a42b359cb2ab2a13c
Low
3,866
32,821
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
32,821
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2007 Eric Seidel <eric@webkit.org> * Copyright (C) 2008 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_ANIMATE_MOTION_ELEMENT_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_ANIMATE_MOTION_ELEMENT_H_ #include "third_party/blink/renderer/core/svg/svg_animation_element.h" #include "third_party/blink/renderer/platform/graphics/path.h" namespace blink { class SVGAnimateMotionElement final : public SVGAnimationElement { DEFINE_WRAPPERTYPEINFO(); public: ~SVGAnimateMotionElement() override; DECLARE_NODE_FACTORY(SVGAnimateMotionElement); void UpdateAnimationPath(); private: explicit SVGAnimateMotionElement(Document&); bool HasValidTarget() override; void ParseAttribute(const AttributeModificationParams&) override; void ResetAnimatedType() override; void ClearAnimatedType() override; bool CalculateToAtEndOfDurationValue( const String& to_at_end_of_duration_string) override; bool CalculateFromAndToValues(const String& from_string, const String& to_string) override; bool CalculateFromAndByValues(const String& from_string, const String& by_string) override; void CalculateAnimatedValue(float percentage, unsigned repeat_count, SVGSMILElement* result_element) override; void ApplyResultsToTarget() override; float CalculateDistance(const String& from_string, const String& to_string) override; enum RotateMode { kRotateAngle, kRotateAuto, kRotateAutoReverse }; RotateMode GetRotateMode() const; bool has_to_point_at_end_of_duration_; void UpdateAnimationMode() override; void InvalidateForAnimateMotionTransformChange(LayoutObject& target); // Note: we do not support percentage values for to/from coords as the spec // implies we should (opera doesn't either) FloatPoint from_point_; FloatPoint to_point_; FloatPoint to_point_at_end_of_duration_; Path path_; Path animation_path_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_ANIMATE_MOTION_ELEMENT_H_
null
null
null
null
29,684
44,709
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
44,709
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef PPAPI_PROXY_GAMEPAD_RESOURCE_H_ #define PPAPI_PROXY_GAMEPAD_RESOURCE_H_ #include <memory> #include "base/compiler_specific.h" #include "base/macros.h" #include "base/memory/shared_memory.h" #include "ppapi/c/ppb_gamepad.h" #include "ppapi/proxy/plugin_resource.h" #include "ppapi/proxy/ppapi_proxy_export.h" #include "ppapi/shared_impl/ppb_gamepad_shared.h" #include "ppapi/thunk/ppb_gamepad_api.h" struct PP_GamepadsSampleData; namespace base { class SharedMemory; } namespace ppapi { namespace proxy { // This class is a bit weird. It isn't a true resource from the plugin's // perspective. But we need to make requests to the browser and get replies. // It's more convenient to do this as a resource, so the instance just // maintains an internal lazily instantiated copy of this resource. class PPAPI_PROXY_EXPORT GamepadResource : public PluginResource, public thunk::PPB_Gamepad_API { public: GamepadResource(Connection connection, PP_Instance instance); ~GamepadResource() override; // Resource implementation. thunk::PPB_Gamepad_API* AsPPB_Gamepad_API() override; // PPB_Gamepad_API. void Sample(PP_Instance instance, PP_GamepadsSampleData* data) override; private: void OnPluginMsgSendMemory(const ResourceMessageReplyParams& params); std::unique_ptr<base::SharedMemory> shared_memory_; const ContentGamepadHardwareBuffer* buffer_; // Last data returned so we can use this in the event of a read failure. PP_GamepadsSampleData last_read_; DISALLOW_COPY_AND_ASSIGN(GamepadResource); }; } // namespace proxy } // namespace ppapi #endif // PPAPI_PROXY_GAMEPAD_RESOURCE_H_
null
null
null
null
41,572
38,777
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
38,777
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_AUDIO_OUTPUT_DEVICES_SET_SINK_ID_CALLBACKS_H_ #define THIRD_PARTY_BLINK_RENDERER_MODULES_AUDIO_OUTPUT_DEVICES_SET_SINK_ID_CALLBACKS_H_ #include "base/memory/scoped_refptr.h" #include "third_party/blink/public/platform/web_set_sink_id_callbacks.h" #include "third_party/blink/renderer/platform/heap/handle.h" #include "third_party/blink/renderer/platform/timer.h" #include "third_party/blink/renderer/platform/wtf/noncopyable.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" namespace blink { class HTMLMediaElement; class ScriptPromiseResolver; class SetSinkIdCallbacks final : public WebSetSinkIdCallbacks { // FIXME(tasak): When making public/platform classes to use PartitionAlloc, // the following macro should be moved to WebCallbacks defined in // public/platform/WebCallbacks.h. USING_FAST_MALLOC(SetSinkIdCallbacks); WTF_MAKE_NONCOPYABLE(SetSinkIdCallbacks); public: SetSinkIdCallbacks(ScriptPromiseResolver*, HTMLMediaElement&, const String& sink_id); ~SetSinkIdCallbacks() override; void OnSuccess() override; void OnError(WebSetSinkIdError) override; private: Persistent<ScriptPromiseResolver> resolver_; Persistent<HTMLMediaElement> element_; String sink_id_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_MODULES_AUDIO_OUTPUT_DEVICES_SET_SINK_ID_CALLBACKS_H_
null
null
null
null
35,640
19,543
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
184,538
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus * * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus. * The I/O port or memory address and the IRQ number must be specified via * module parameters: * * insmod cc770_isa.ko port=0x310,0x380 irq=7,11 * * for ISA devices using I/O ports or: * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 * * for memory mapped ISA devices. * * Indirect access via address and data port is supported as well: * * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11 * * Furthermore, the following mode parameter can be defined: * * clk: External oscillator clock frequency (default=16000000 [16 MHz]) * cir: CPU interface register (default=0x40 [DSC]) * bcr: Bus configuration register (default=0x40 [CBY]) * cor: Clockout register (default=0x00) * * Note: for clk, cir, bcr and cor, the first argument re-defines the * default for all other devices, e.g.: * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000 * * is equivalent to * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/platform/cc770.h> #include "cc770.h" #define MAXDEV 8 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus"); MODULE_LICENSE("GPL v2"); #define CLK_DEFAULT 16000000 /* 16 MHz */ #define COR_DEFAULT 0x00 #define BCR_DEFAULT BUSCFG_CBY static unsigned long port[MAXDEV]; static unsigned long mem[MAXDEV]; static int irq[MAXDEV]; static int clk[MAXDEV]; static u8 cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; module_param_array(port, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); module_param_array(mem, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(mem, "I/O memory address"); module_param_array(indirect, int, NULL, S_IRUGO); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); module_param_array(irq, int, NULL, S_IRUGO); MODULE_PARM_DESC(irq, "IRQ number"); module_param_array(clk, int, NULL, S_IRUGO); MODULE_PARM_DESC(clk, "External oscillator clock frequency " "(default=16000000 [16 MHz])"); module_param_array(cir, byte, NULL, S_IRUGO); MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])"); module_param_array(cor, byte, NULL, S_IRUGO); MODULE_PARM_DESC(cor, "Clockout register (default=0x00)"); module_param_array(bcr, byte, NULL, S_IRUGO); MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])"); #define CC770_IOSIZE 0x20 #define CC770_IOSIZE_INDIRECT 0x02 /* Spinlock for cc770_isa_port_write_reg_indirect * and cc770_isa_port_read_reg_indirect */ static DEFINE_SPINLOCK(cc770_isa_port_lock); static struct platform_device *cc770_isa_devs[MAXDEV]; static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg) { return readb(priv->reg_base + reg); } static void cc770_isa_mem_write_reg(const struct cc770_priv *priv, int reg, u8 val) { writeb(val, priv->reg_base + reg); } static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg) { return inb((unsigned long)priv->reg_base + reg); } static void cc770_isa_port_write_reg(const struct cc770_priv *priv, int reg, u8 val) { outb(val, (unsigned long)priv->reg_base + reg); } static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv, int reg) { unsigned long base = (unsigned long)priv->reg_base; unsigned long flags; u8 val; spin_lock_irqsave(&cc770_isa_port_lock, flags); outb(reg, base); val = inb(base + 1); spin_unlock_irqrestore(&cc770_isa_port_lock, flags); return val; } static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv, int reg, u8 val) { unsigned long base = (unsigned long)priv->reg_base; unsigned long flags; spin_lock_irqsave(&cc770_isa_port_lock, flags); outb(reg, base); outb(val, base + 1); spin_unlock_irqrestore(&cc770_isa_port_lock, flags); } static int cc770_isa_probe(struct platform_device *pdev) { struct net_device *dev; struct cc770_priv *priv; void __iomem *base = NULL; int iosize = CC770_IOSIZE; int idx = pdev->id; int err; u32 clktmp; dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", idx, port[idx], mem[idx], irq[idx]); if (mem[idx]) { if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) { err = -EBUSY; goto exit; } base = ioremap_nocache(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; } } else { if (indirect[idx] > 0 || (indirect[idx] == -1 && indirect[0] > 0)) iosize = CC770_IOSIZE_INDIRECT; if (!request_region(port[idx], iosize, KBUILD_MODNAME)) { err = -EBUSY; goto exit; } } dev = alloc_cc770dev(0); if (!dev) { err = -ENOMEM; goto exit_unmap; } priv = netdev_priv(dev); dev->irq = irq[idx]; priv->irq_flags = IRQF_SHARED; if (mem[idx]) { priv->reg_base = base; dev->base_addr = mem[idx]; priv->read_reg = cc770_isa_mem_read_reg; priv->write_reg = cc770_isa_mem_write_reg; } else { priv->reg_base = (void __iomem *)port[idx]; dev->base_addr = port[idx]; if (iosize == CC770_IOSIZE_INDIRECT) { priv->read_reg = cc770_isa_port_read_reg_indirect; priv->write_reg = cc770_isa_port_write_reg_indirect; } else { priv->read_reg = cc770_isa_port_read_reg; priv->write_reg = cc770_isa_port_write_reg; } } if (clk[idx]) clktmp = clk[idx]; else if (clk[0]) clktmp = clk[0]; else clktmp = CLK_DEFAULT; priv->can.clock.freq = clktmp; if (cir[idx] != 0xff) { priv->cpu_interface = cir[idx]; } else if (cir[0] != 0xff) { priv->cpu_interface = cir[0]; } else { /* The system clock may not exceed 10 MHz */ if (clktmp > 10000000) { priv->cpu_interface |= CPUIF_DSC; clktmp /= 2; } /* The memory clock may not exceed 8 MHz */ if (clktmp > 8000000) priv->cpu_interface |= CPUIF_DMC; } if (priv->cpu_interface & CPUIF_DSC) priv->can.clock.freq /= 2; if (bcr[idx] != 0xff) priv->bus_config = bcr[idx]; else if (bcr[0] != 0xff) priv->bus_config = bcr[0]; else priv->bus_config = BCR_DEFAULT; if (cor[idx] != 0xff) priv->clkout = cor[idx]; else if (cor[0] != 0xff) priv->clkout = cor[0]; else priv->clkout = COR_DEFAULT; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_cc770dev(dev); if (err) { dev_err(&pdev->dev, "couldn't register device (err=%d)\n", err); goto exit_unmap; } dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; exit_unmap: if (mem[idx]) iounmap(base); exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); exit: return err; } static int cc770_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct cc770_priv *priv = netdev_priv(dev); int idx = pdev->id; unregister_cc770dev(dev); if (mem[idx]) { iounmap(priv->reg_base); release_mem_region(mem[idx], CC770_IOSIZE); } else { if (priv->read_reg == cc770_isa_port_read_reg_indirect) release_region(port[idx], CC770_IOSIZE_INDIRECT); else release_region(port[idx], CC770_IOSIZE); } free_cc770dev(dev); return 0; } static struct platform_driver cc770_isa_driver = { .probe = cc770_isa_probe, .remove = cc770_isa_remove, .driver = { .name = KBUILD_MODNAME, }, }; static int __init cc770_isa_init(void) { int idx, err; for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { if ((port[idx] || mem[idx]) && irq[idx]) { cc770_isa_devs[idx] = platform_device_alloc(KBUILD_MODNAME, idx); if (!cc770_isa_devs[idx]) { err = -ENOMEM; goto exit_free_devices; } err = platform_device_add(cc770_isa_devs[idx]); if (err) { platform_device_put(cc770_isa_devs[idx]); goto exit_free_devices; } pr_debug("platform device %d: port=%#lx, mem=%#lx, " "irq=%d\n", idx, port[idx], mem[idx], irq[idx]); } else if (idx == 0 || port[idx] || mem[idx]) { pr_err("insufficient parameters supplied\n"); err = -EINVAL; goto exit_free_devices; } } err = platform_driver_register(&cc770_isa_driver); if (err) goto exit_free_devices; pr_info("driver for max. %d devices registered\n", MAXDEV); return 0; exit_free_devices: while (--idx >= 0) { if (cc770_isa_devs[idx]) platform_device_unregister(cc770_isa_devs[idx]); } return err; } module_init(cc770_isa_init); static void __exit cc770_isa_exit(void) { int idx; platform_driver_unregister(&cc770_isa_driver); for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { if (cc770_isa_devs[idx]) platform_device_unregister(cc770_isa_devs[idx]); } } module_exit(cc770_isa_exit);
null
null
null
null
92,885
15,194
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
180,189
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2013 Cavium, Inc. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <uapi/asm/bitfield.h> #include <asm/byteorder.h> #include <asm/io.h> #define PCI_CONFIG_ADDRESS 0xcf8 #define PCI_CONFIG_DATA 0xcfc union pci_config_address { struct { __BITFIELD_FIELD(unsigned enable_bit : 1, /* 31 */ __BITFIELD_FIELD(unsigned reserved : 7, /* 30 .. 24 */ __BITFIELD_FIELD(unsigned bus_number : 8, /* 23 .. 16 */ __BITFIELD_FIELD(unsigned devfn_number : 8, /* 15 .. 8 */ __BITFIELD_FIELD(unsigned register_number : 8, /* 7 .. 0 */ ))))); }; u32 w; }; int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; } int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return ((pin + slot) % 4)+ MIPS_IRQ_PCIA; } static void pci_virtio_guest_write_config_addr(struct pci_bus *bus, unsigned int devfn, int reg) { union pci_config_address pca = { .w = 0 }; pca.register_number = reg; pca.devfn_number = devfn; pca.bus_number = bus->number; pca.enable_bit = 1; outl(pca.w, PCI_CONFIG_ADDRESS); } static int pci_virtio_guest_write_config(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 val) { pci_virtio_guest_write_config_addr(bus, devfn, reg); switch (size) { case 1: outb(val, PCI_CONFIG_DATA + (reg & 3)); break; case 2: outw(val, PCI_CONFIG_DATA + (reg & 2)); break; case 4: outl(val, PCI_CONFIG_DATA); break; } return PCIBIOS_SUCCESSFUL; } static int pci_virtio_guest_read_config(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 *val) { pci_virtio_guest_write_config_addr(bus, devfn, reg); switch (size) { case 1: *val = inb(PCI_CONFIG_DATA + (reg & 3)); break; case 2: *val = inw(PCI_CONFIG_DATA + (reg & 2)); break; case 4: *val = inl(PCI_CONFIG_DATA); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops pci_virtio_guest_ops = { .read = pci_virtio_guest_read_config, .write = pci_virtio_guest_write_config, }; static struct resource pci_virtio_guest_mem_resource = { .name = "Virtio MEM", .flags = IORESOURCE_MEM, .start = 0x10000000, .end = 0x1dffffff }; static struct resource pci_virtio_guest_io_resource = { .name = "Virtio IO", .flags = IORESOURCE_IO, .start = 0, .end = 0xffff }; static struct pci_controller pci_virtio_guest_controller = { .pci_ops = &pci_virtio_guest_ops, .mem_resource = &pci_virtio_guest_mem_resource, .io_resource = &pci_virtio_guest_io_resource, }; static int __init pci_virtio_guest_setup(void) { pr_err("pci_virtio_guest_setup\n"); /* Virtio comes pre-assigned */ pci_set_flags(PCI_PROBE_ONLY); pci_virtio_guest_controller.io_map_base = mips_io_port_base; register_pci_controller(&pci_virtio_guest_controller); return 0; } arch_initcall(pci_virtio_guest_setup);
null
null
null
null
88,536
43,452
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
208,447
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/can.h * * Definitions for CAN network layer (socket addr / CAN frame / CAN filter) * * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> * Urs Thuermann <urs.thuermann@volkswagen.de> * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #ifndef _UAPI_CAN_H #define _UAPI_CAN_H #include <linux/types.h> #include <linux/socket.h> /* controller area network (CAN) kernel definitions */ /* special address description flags for the CAN_ID */ #define CAN_EFF_FLAG 0x80000000U /* EFF/SFF is set in the MSB */ #define CAN_RTR_FLAG 0x40000000U /* remote transmission request */ #define CAN_ERR_FLAG 0x20000000U /* error message frame */ /* valid bits in CAN ID for frame formats */ #define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */ #define CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */ #define CAN_ERR_MASK 0x1FFFFFFFU /* omit EFF, RTR, ERR flags */ /* * Controller Area Network Identifier structure * * bit 0-28 : CAN identifier (11/29 bit) * bit 29 : error message frame flag (0 = data frame, 1 = error message) * bit 30 : remote transmission request flag (1 = rtr frame) * bit 31 : frame format flag (0 = standard 11 bit, 1 = extended 29 bit) */ typedef __u32 canid_t; #define CAN_SFF_ID_BITS 11 #define CAN_EFF_ID_BITS 29 /* * Controller Area Network Error Message Frame Mask structure * * bit 0-28 : error class mask (see include/linux/can/error.h) * bit 29-31 : set to zero */ typedef __u32 can_err_mask_t; /* CAN payload length and DLC definitions according to ISO 11898-1 */ #define CAN_MAX_DLC 8 #define CAN_MAX_DLEN 8 /* CAN FD payload length and DLC definitions according to ISO 11898-7 */ #define CANFD_MAX_DLC 15 #define CANFD_MAX_DLEN 64 /** * struct can_frame - basic CAN frame structure * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition * @can_dlc: frame payload length in byte (0 .. 8) aka data length code * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1 * mapping of the 'data length code' to the real payload length * @__pad: padding * @__res0: reserved / padding * @__res1: reserved / padding * @data: CAN frame payload (up to 8 byte) */ struct can_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */ __u8 __pad; /* padding */ __u8 __res0; /* reserved / padding */ __u8 __res1; /* reserved / padding */ __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8))); }; /* * defined bits for canfd_frame.flags * * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to * be set in the CAN frame bitstream on the wire. The EDL bit switch turns * the CAN controllers bitstream processor into the CAN FD mode which creates * two new options within the CAN FD frame specification: * * Bit Rate Switch - to indicate a second bitrate is/was used for the payload * Error State Indicator - represents the error state of the transmitting node * * As the CANFD_ESI bit is internally generated by the transmitting CAN * controller only the CANFD_BRS bit is relevant for real CAN controllers when * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make * sense for virtual CAN interfaces to test applications with echoed frames. */ #define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */ #define CANFD_ESI 0x02 /* error state indicator of the transmitting node */ /** * struct canfd_frame - CAN flexible data rate frame structure * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition * @len: frame payload length in byte (0 .. CANFD_MAX_DLEN) * @flags: additional flags for CAN FD * @__res0: reserved / padding * @__res1: reserved / padding * @data: CAN FD frame payload (up to CANFD_MAX_DLEN byte) */ struct canfd_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 len; /* frame payload length in byte */ __u8 flags; /* additional flags for CAN FD */ __u8 __res0; /* reserved / padding */ __u8 __res1; /* reserved / padding */ __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8))); }; #define CAN_MTU (sizeof(struct can_frame)) #define CANFD_MTU (sizeof(struct canfd_frame)) /* particular protocols of the protocol family PF_CAN */ #define CAN_RAW 1 /* RAW sockets */ #define CAN_BCM 2 /* Broadcast Manager */ #define CAN_TP16 3 /* VAG Transport Protocol v1.6 */ #define CAN_TP20 4 /* VAG Transport Protocol v2.0 */ #define CAN_MCNET 5 /* Bosch MCNet */ #define CAN_ISOTP 6 /* ISO 15765-2 Transport Protocol */ #define CAN_NPROTO 7 #define SOL_CAN_BASE 100 /** * struct sockaddr_can - the sockaddr structure for CAN sockets * @can_family: address family number AF_CAN. * @can_ifindex: CAN network interface index. * @can_addr: protocol specific address information */ struct sockaddr_can { __kernel_sa_family_t can_family; int can_ifindex; union { /* transport protocol class address information (e.g. ISOTP) */ struct { canid_t rx_id, tx_id; } tp; /* reserved for future CAN protocols address information */ } can_addr; }; /** * struct can_filter - CAN ID based filter in can_register(). * @can_id: relevant bits of CAN ID which are not masked out. * @can_mask: CAN mask (see description) * * Description: * A filter matches, when * * <received_can_id> & mask == can_id & mask * * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can * filter for error message frames (CAN_ERR_FLAG bit set in mask). */ struct can_filter { canid_t can_id; canid_t can_mask; }; #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ #define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */ #endif /* !_UAPI_CAN_H */
null
null
null
null
116,794
11,332
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
11,332
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "gpu/command_buffer/client/client_context_state.h" #include "base/logging.h" namespace gpu { namespace gles2 { ClientContextState::ClientContextState() = default; ClientContextState::~ClientContextState() = default; void ClientContextState::SetViewport( GLint x, GLint y, GLsizei width, GLsizei height) { DCHECK_LE(0, width); DCHECK_LE(0, height); viewport_x = x; viewport_y = y; viewport_width = width; viewport_height = height; } // Include the auto-generated part of this file. We split this because it means // we can easily edit the non-auto generated parts right here in this file // instead of having to edit some template or the code generator. #include "gpu/command_buffer/client/client_context_state_impl_autogen.h" } // namespace gles2 } // namespace gpu
null
null
null
null
8,195
4,343
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
4,343
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_SYNC_IOS_CHROME_PROFILE_SYNC_SERVICE_FACTORY_H_ #define IOS_CHROME_BROWSER_SYNC_IOS_CHROME_PROFILE_SYNC_SERVICE_FACTORY_H_ #include <memory> #include "base/macros.h" #include "components/keyed_service/ios/browser_state_keyed_service_factory.h" namespace base { template <typename T> struct DefaultSingletonTraits; } // namespace base namespace browser_sync { class ProfileSyncService; } // namespace browser_sync namespace ios { class ChromeBrowserState; } // namespace ios // Singleton that owns all ProfileSyncService and associates them with // ios::ChromeBrowserState. class IOSChromeProfileSyncServiceFactory : public BrowserStateKeyedServiceFactory { public: static browser_sync::ProfileSyncService* GetForBrowserState( ios::ChromeBrowserState* browser_state); static browser_sync::ProfileSyncService* GetForBrowserStateIfExists( ios::ChromeBrowserState* browser_state); static IOSChromeProfileSyncServiceFactory* GetInstance(); private: friend struct base::DefaultSingletonTraits< IOSChromeProfileSyncServiceFactory>; IOSChromeProfileSyncServiceFactory(); ~IOSChromeProfileSyncServiceFactory() override; // BrowserStateKeyedServiceFactory implementation. std::unique_ptr<KeyedService> BuildServiceInstanceFor( web::BrowserState* context) const override; }; #endif // IOS_CHROME_BROWSER_SYNC_IOS_CHROME_PROFILE_SYNC_SERVICE_FACTORY_H_
null
null
null
null
1,206
9,580
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,580
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/nqe/network_id.h" #include <string> #include "base/strings/string_number_conversions.h" #include "net/base/network_change_notifier.h" #include "testing/gtest/include/gtest/gtest.h" namespace net { namespace nqe { namespace internal { namespace { TEST(NetworkIDTest, TestSerialize) { nqe::internal::NetworkID network_id(NetworkChangeNotifier::CONNECTION_2G, "test1", 2); std::string serialized = network_id.ToString(); EXPECT_EQ(network_id, NetworkID::FromString(serialized)); } } // namespace } // namespace internal } // namespace nqe } // namespace net
null
null
null
null
6,443
55,717
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
55,717
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/image_decoder.h" #include <utility> #include "base/bind.h" #include "base/callback.h" #include "base/threading/thread_task_runner_handle.h" #include "build/build_config.h" #include "content/public/browser/browser_thread.h" #include "content/public/common/service_manager_connection.h" #include "ipc/ipc_channel.h" #include "services/data_decoder/public/cpp/decode_image.h" #include "services/service_manager/public/cpp/connector.h" #include "third_party/skia/include/core/SkBitmap.h" #include "ui/gfx/geometry/size.h" namespace { const int64_t kMaxImageSizeInBytes = static_cast<int64_t>(IPC::Channel::kMaximumMessageSize); // Note that this is always called on the thread which initiated the // corresponding data_decoder::DecodeImage request. void OnDecodeImageDone( base::Callback<void(int)> fail_callback, base::Callback<void(const SkBitmap&, int)> success_callback, int request_id, const SkBitmap& image) { if (!image.isNull() && !image.empty()) success_callback.Run(image, request_id); else fail_callback.Run(request_id); } void BindToBrowserConnector(service_manager::mojom::ConnectorRequest request) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); content::ServiceManagerConnection::GetForProcess()->GetConnector() ->BindConnectorRequest(std::move(request)); } void RunDecodeCallbackOnTaskRunner( data_decoder::mojom::ImageDecoder::DecodeImageCallback callback, scoped_refptr<base::SequencedTaskRunner> task_runner, const SkBitmap& image) { task_runner->PostTask(FROM_HERE, base::BindOnce(std::move(callback), image)); } void DecodeImage( std::vector<uint8_t> image_data, data_decoder::mojom::ImageCodec codec, bool shrink_to_fit, const gfx::Size& desired_image_frame_size, data_decoder::mojom::ImageDecoder::DecodeImageCallback callback, scoped_refptr<base::SequencedTaskRunner> callback_task_runner) { DCHECK_CURRENTLY_ON(content::BrowserThread::IO); service_manager::mojom::ConnectorRequest connector_request; std::unique_ptr<service_manager::Connector> connector = service_manager::Connector::Create(&connector_request); content::BrowserThread::PostTask( content::BrowserThread::UI, FROM_HERE, base::BindOnce(&BindToBrowserConnector, std::move(connector_request))); data_decoder::DecodeImage( connector.get(), image_data, codec, shrink_to_fit, kMaxImageSizeInBytes, desired_image_frame_size, base::BindOnce(&RunDecodeCallbackOnTaskRunner, std::move(callback), std::move(callback_task_runner))); } } // namespace ImageDecoder::ImageRequest::ImageRequest() : task_runner_(base::ThreadTaskRunnerHandle::Get()) { DCHECK(sequence_checker_.CalledOnValidSequence()); } ImageDecoder::ImageRequest::ImageRequest( const scoped_refptr<base::SequencedTaskRunner>& task_runner) : task_runner_(task_runner) { DCHECK(sequence_checker_.CalledOnValidSequence()); } ImageDecoder::ImageRequest::~ImageRequest() { DCHECK(sequence_checker_.CalledOnValidSequence()); ImageDecoder::Cancel(this); } // static ImageDecoder* ImageDecoder::GetInstance() { static auto* image_decoder = new ImageDecoder(); return image_decoder; } // static void ImageDecoder::Start(ImageRequest* image_request, std::vector<uint8_t> image_data) { StartWithOptions(image_request, std::move(image_data), DEFAULT_CODEC, false, gfx::Size()); } // static void ImageDecoder::Start(ImageRequest* image_request, const std::string& image_data) { Start(image_request, std::vector<uint8_t>(image_data.begin(), image_data.end())); } // static void ImageDecoder::StartWithOptions(ImageRequest* image_request, std::vector<uint8_t> image_data, ImageCodec image_codec, bool shrink_to_fit, const gfx::Size& desired_image_frame_size) { ImageDecoder::GetInstance()->StartWithOptionsImpl( image_request, std::move(image_data), image_codec, shrink_to_fit, desired_image_frame_size); } // static void ImageDecoder::StartWithOptions(ImageRequest* image_request, const std::string& image_data, ImageCodec image_codec, bool shrink_to_fit) { StartWithOptions(image_request, std::vector<uint8_t>(image_data.begin(), image_data.end()), image_codec, shrink_to_fit, gfx::Size()); } ImageDecoder::ImageDecoder() : image_request_id_counter_(0) {} void ImageDecoder::StartWithOptionsImpl( ImageRequest* image_request, std::vector<uint8_t> image_data, ImageCodec image_codec, bool shrink_to_fit, const gfx::Size& desired_image_frame_size) { DCHECK(image_request); DCHECK(image_request->task_runner()); int request_id; { base::AutoLock lock(map_lock_); request_id = image_request_id_counter_++; image_request_id_map_.insert(std::make_pair(request_id, image_request)); } data_decoder::mojom::ImageCodec codec = data_decoder::mojom::ImageCodec::DEFAULT; #if defined(OS_CHROMEOS) if (image_codec == ROBUST_JPEG_CODEC) codec = data_decoder::mojom::ImageCodec::ROBUST_JPEG; if (image_codec == ROBUST_PNG_CODEC) codec = data_decoder::mojom::ImageCodec::ROBUST_PNG; #endif // defined(OS_CHROMEOS) auto callback = base::Bind( &OnDecodeImageDone, base::Bind(&ImageDecoder::OnDecodeImageFailed, base::Unretained(this)), base::Bind(&ImageDecoder::OnDecodeImageSucceeded, base::Unretained(this)), request_id); // NOTE: There exist ImageDecoder consumers which implicitly rely on this // operation happening on a thread which always has a ThreadTaskRunnerHandle. // We arbitrarily use the IO thread here to match details of the legacy // implementation. content::BrowserThread::PostTask( content::BrowserThread::IO, FROM_HERE, base::BindOnce(&DecodeImage, std::move(image_data), codec, shrink_to_fit, desired_image_frame_size, callback, base::WrapRefCounted(image_request->task_runner()))); } // static void ImageDecoder::Cancel(ImageRequest* image_request) { DCHECK(image_request); ImageDecoder::GetInstance()->CancelImpl(image_request); } void ImageDecoder::CancelImpl(ImageRequest* image_request) { base::AutoLock lock(map_lock_); for (auto it = image_request_id_map_.begin(); it != image_request_id_map_.end();) { if (it->second == image_request) { image_request_id_map_.erase(it++); } else { ++it; } } } void ImageDecoder::OnDecodeImageSucceeded( const SkBitmap& decoded_image, int request_id) { ImageRequest* image_request; { base::AutoLock lock(map_lock_); auto it = image_request_id_map_.find(request_id); if (it == image_request_id_map_.end()) return; image_request = it->second; image_request_id_map_.erase(it); } DCHECK(image_request->task_runner()->RunsTasksInCurrentSequence()); image_request->OnImageDecoded(decoded_image); } void ImageDecoder::OnDecodeImageFailed(int request_id) { ImageRequest* image_request; { base::AutoLock lock(map_lock_); auto it = image_request_id_map_.find(request_id); if (it == image_request_id_map_.end()) return; image_request = it->second; image_request_id_map_.erase(it); } DCHECK(image_request->task_runner()->RunsTasksInCurrentSequence()); image_request->OnDecodeImageFailed(); }
null
null
null
null
52,580
30,092
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
30,092
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// // File: vk_platform.h // /* ** Copyright (c) 2014-2017 The Khronos Group Inc. ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. */ #ifndef VK_PLATFORM_H_ #define VK_PLATFORM_H_ #ifdef __cplusplus extern "C" { #endif // __cplusplus /* *************************************************************************************************** * Platform-specific directives and type declarations *************************************************************************************************** */ /* Platform-specific calling convention macros. * * Platforms should define these so that Vulkan clients call Vulkan commands * with the same calling conventions that the Vulkan implementation expects. * * VKAPI_ATTR - Placed before the return type in function declarations. * Useful for C++11 and GCC/Clang-style function attribute syntax. * VKAPI_CALL - Placed after the return type in function declarations. * Useful for MSVC-style calling convention syntax. * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. * * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); */ #if defined(_WIN32) // On Windows, Vulkan commands use the stdcall convention #define VKAPI_ATTR #define VKAPI_CALL __stdcall #define VKAPI_PTR VKAPI_CALL #elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 #error "Vulkan isn't supported for the 'armeabi' NDK ABI" #elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" // calling convention, i.e. float parameters are passed in registers. This // is true even if the rest of the application passes floats on the stack, // as it does by default when compiling for the armeabi-v7a NDK ABI. #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) #define VKAPI_CALL #define VKAPI_PTR VKAPI_ATTR #else // On other platforms, use the default calling convention #define VKAPI_ATTR #define VKAPI_CALL #define VKAPI_PTR #endif #include <stddef.h> #if !defined(VK_NO_STDINT_H) #if defined(_MSC_VER) && (_MSC_VER < 1600) typedef signed __int8 int8_t; typedef unsigned __int8 uint8_t; typedef signed __int16 int16_t; typedef unsigned __int16 uint16_t; typedef signed __int32 int32_t; typedef unsigned __int32 uint32_t; typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; #else #include <stdint.h> #endif #endif // !defined(VK_NO_STDINT_H) #ifdef __cplusplus } // extern "C" #endif // __cplusplus // Platform-specific headers required by platform window system extensions. // These are enabled prior to #including "vulkan.h". The same enable then // controls inclusion of the extension interfaces in vulkan.h. #ifdef VK_USE_PLATFORM_ANDROID_KHR #include <android/native_window.h> #endif #ifdef VK_USE_PLATFORM_MIR_KHR #include <mir_toolkit/client_types.h> #endif #ifdef VK_USE_PLATFORM_WAYLAND_KHR #include <wayland-client.h> #endif #ifdef VK_USE_PLATFORM_WIN32_KHR #include <windows.h> #endif #ifdef VK_USE_PLATFORM_XLIB_KHR #include <X11/Xlib.h> #endif #ifdef VK_USE_PLATFORM_XCB_KHR #include <xcb/xcb.h> #endif #endif
null
null
null
null
26,955
18,030
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
183,025
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright(c) 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #if !defined(__RVT_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ) #define __RVT_TRACE_TX_H #include <linux/tracepoint.h> #include <linux/trace_seq.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_vt.h> #undef TRACE_SYSTEM #define TRACE_SYSTEM rvt_tx #define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode } #define show_wr_opcode(opcode) \ __print_symbolic(opcode, \ wr_opcode_name(RDMA_WRITE), \ wr_opcode_name(RDMA_WRITE_WITH_IMM), \ wr_opcode_name(SEND), \ wr_opcode_name(SEND_WITH_IMM), \ wr_opcode_name(RDMA_READ), \ wr_opcode_name(ATOMIC_CMP_AND_SWP), \ wr_opcode_name(ATOMIC_FETCH_AND_ADD), \ wr_opcode_name(LSO), \ wr_opcode_name(SEND_WITH_INV), \ wr_opcode_name(RDMA_READ_WITH_INV), \ wr_opcode_name(LOCAL_INV), \ wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \ wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD)) #define POS_PRN \ "[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u" TRACE_EVENT( rvt_post_one_wr, TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe), TP_ARGS(qp, wqe), TP_STRUCT__entry( RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) __field(u64, wr_id) __field(u32, qpn) __field(u32, psn) __field(u32, lpsn) __field(u32, length) __field(u32, opcode) __field(u32, size) __field(u32, avail) __field(u32, head) __field(u32, last) ), TP_fast_assign( RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)) __entry->wr_id = wqe->wr.wr_id; __entry->qpn = qp->ibqp.qp_num; __entry->psn = wqe->psn; __entry->lpsn = wqe->lpsn; __entry->length = wqe->length; __entry->opcode = wqe->wr.opcode; __entry->size = qp->s_size; __entry->avail = qp->s_avail; __entry->head = qp->s_head; __entry->last = qp->s_last; ), TP_printk( POS_PRN, __get_str(dev), __entry->wr_id, __entry->qpn, __entry->psn, __entry->lpsn, __entry->length, __entry->opcode, show_wr_opcode(__entry->opcode), __entry->size, __entry->avail, __entry->head, __entry->last ) ); #endif /* __RVT_TRACE_TX_H */ #undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE trace_tx #include <trace/define_trace.h>
null
null
null
null
91,372
48,824
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
48,824
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_GL_GL_SWITCHES_UTIL_H_ #define UI_GL_GL_SWITCHES_UTIL_H_ #include "ui/gl/gl_export.h" namespace gl { GL_EXPORT bool IsPresentationCallbackEnabled(); } // namespace gl #endif // UI_GL_GL_SWITCHES_UTIL_H_
null
null
null
null
45,687
12,760
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
12,760
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_UPDATE_CLIENT_ACTION_RUNNER_H_ #define COMPONENTS_UPDATE_CLIENT_ACTION_RUNNER_H_ #include <stdint.h> #include <memory> #include <utility> #include <vector> #include "base/callback.h" #include "base/files/file_path.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/threading/thread_checker.h" #include "components/update_client/component_unpacker.h" namespace base { class CommandLine; class Process; class SingleThreadTaskRunner; } namespace update_client { class Component; class ActionRunner { public: using Callback = base::OnceCallback<void(bool succeeded, int error_code, int extra_code1)>; explicit ActionRunner(const Component& component); ~ActionRunner(); void Run(Callback run_complete); private: void Unpack(std::unique_ptr<service_manager::Connector> connector); void UnpackComplete(const ComponentUnpacker::Result& result); void RunCommand(const base::CommandLine& cmdline); base::CommandLine MakeCommandLine(const base::FilePath& unpack_path) const; void WaitForCommand(base::Process process); const Component& component_; // Used to post callbacks to the main thread. scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_; // Contains the unpack path for the component associated with the run action. base::FilePath unpack_path_; Callback run_complete_; THREAD_CHECKER(thread_checker_); DISALLOW_COPY_AND_ASSIGN(ActionRunner); }; } // namespace update_client #endif // COMPONENTS_UPDATE_CLIENT_ACTION_RUNNER_H_
null
null
null
null
9,623
68,755
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
68,755
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef REMOTING_BASE_RUNNING_SAMPLES_H_ #define REMOTING_BASE_RUNNING_SAMPLES_H_ #include <stddef.h> #include <stdint.h> #include "base/containers/circular_deque.h" #include "base/macros.h" #include "base/threading/thread_checker.h" namespace remoting { // Calculates the maximum or average of the most recent N recorded samples. // This is typically used to smooth out random variation in point samples // over bandwidth, frame rate, etc. class RunningSamples { public: // Constructs a running sample helper that stores |window_size| most // recent samples. explicit RunningSamples(int window_size); virtual ~RunningSamples(); // Records a point sample. void Record(int64_t value); // Returns the average over up to |window_size| of the most recent samples. // 0 if no sample available double Average() const; // Returns the max over up to |window_size| of the most recent samples. // 0 if no sample available int64_t Max() const; // Whether there is at least one record. bool IsEmpty() const; private: // Stores the desired window size, as size_t to avoid casting when comparing // with the size of |data_points_|. const size_t window_size_; // Stores the |window_size| most recently recorded samples. base::circular_deque<int64_t> data_points_; // Holds the sum of the samples in |data_points_|. int64_t sum_ = 0; base::ThreadChecker thread_checker_; DISALLOW_COPY_AND_ASSIGN(RunningSamples); }; } // namespace remoting #endif // REMOTING_BASE_RUNNING_SAMPLES_H_
null
null
null
null
65,618
14,617
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,617
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/history/core/browser/download_types.h" #include <ostream> #include "base/logging.h" #include "components/history/core/browser/download_constants.h" namespace history { DownloadState IntToDownloadState(int state) { switch (static_cast<DownloadState>(state)) { case DownloadState::IN_PROGRESS: case DownloadState::COMPLETE: case DownloadState::CANCELLED: case DownloadState::INTERRUPTED: return static_cast<DownloadState>(state); case DownloadState::INVALID: case DownloadState::BUG_140687: NOTREACHED(); return DownloadState::INVALID; } NOTREACHED(); return DownloadState::INVALID; } int DownloadStateToInt(DownloadState state) { DCHECK_NE(state, DownloadState::INVALID); return static_cast<int>(state); } std::ostream& operator<<(std::ostream& stream, DownloadState state) { switch (state) { case DownloadState::INVALID: return stream << "history::DownloadState::COMPLETE"; case DownloadState::IN_PROGRESS: return stream << "history::DownloadState::IN_PROGRESS"; case DownloadState::COMPLETE: return stream << "history::DownloadState::COMPLETE"; case DownloadState::CANCELLED: return stream << "history::DownloadState::CANCELLED"; case DownloadState::BUG_140687: return stream << "history::DownloadState::BUG_140687"; case DownloadState::INTERRUPTED: return stream << "history::DownloadState::INTERRUPTED"; } NOTREACHED(); return stream; } DownloadDangerType IntToDownloadDangerType(int danger_type) { switch (static_cast<DownloadDangerType>(danger_type)) { case DownloadDangerType::NOT_DANGEROUS: case DownloadDangerType::DANGEROUS_FILE: case DownloadDangerType::DANGEROUS_URL: case DownloadDangerType::DANGEROUS_CONTENT: case DownloadDangerType::MAYBE_DANGEROUS_CONTENT: case DownloadDangerType::UNCOMMON_CONTENT: case DownloadDangerType::USER_VALIDATED: case DownloadDangerType::DANGEROUS_HOST: case DownloadDangerType::POTENTIALLY_UNWANTED: return static_cast<DownloadDangerType>(danger_type); case DownloadDangerType::INVALID: NOTREACHED(); return DownloadDangerType::INVALID; } NOTREACHED(); return DownloadDangerType::INVALID; } int DownloadDangerTypeToInt(DownloadDangerType danger_type) { DCHECK_NE(danger_type, DownloadDangerType::INVALID); return static_cast<int>(danger_type); } std::ostream& operator<<(std::ostream& stream, DownloadDangerType danger_type) { switch (danger_type) { case DownloadDangerType::INVALID: return stream << "history::DownloadDangerType::INVALID"; case DownloadDangerType::NOT_DANGEROUS: return stream << "history::DownloadDangerType::NOT_DANGEROUS"; case DownloadDangerType::DANGEROUS_FILE: return stream << "history::DownloadDangerType::DANGEROUS_FILE"; case DownloadDangerType::DANGEROUS_URL: return stream << "history::DownloadDangerType::DANGEROUS_URL"; case DownloadDangerType::DANGEROUS_CONTENT: return stream << "history::DownloadDangerType::DANGEROUS_CONTENT"; case DownloadDangerType::MAYBE_DANGEROUS_CONTENT: return stream << "history::DownloadDangerType::MAYBE_DANGEROUS_CONTENT"; case DownloadDangerType::UNCOMMON_CONTENT: return stream << "history::DownloadDangerType::UNCOMMON_CONTENT"; case DownloadDangerType::USER_VALIDATED: return stream << "history::DownloadDangerType::USER_VALIDATED"; case DownloadDangerType::DANGEROUS_HOST: return stream << "history::DownloadDangerType::DANGEROUS_HOST"; case DownloadDangerType::POTENTIALLY_UNWANTED: return stream << "history::DownloadDangerType::POTENTIALLY_UNWANTED"; } NOTREACHED(); return stream; } DownloadInterruptReason IntToDownloadInterruptReason(int interrupt_reason) { return static_cast<DownloadInterruptReason>(interrupt_reason); } int DownloadInterruptReasonToInt(DownloadInterruptReason interrupt_reason) { return static_cast<int>(interrupt_reason); } const DownloadId kInvalidDownloadId = 0; bool ConvertIntToDownloadId(int64_t id, DownloadId* out) { if (id <= static_cast<int64_t>(kInvalidDownloadId)) return false; *out = static_cast<DownloadId>(id); return true; } int64_t DownloadIdToInt(DownloadId id) { DCHECK_NE(id, kInvalidDownloadId); return static_cast<int64_t>(id); } } // namespace history
null
null
null
null
11,480
34,755
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
199,750
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* dvb-usb.h is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * the headerfile, all dvb-usb-drivers have to include. * * TODO: clean-up the structures for unused fields and update the comments */ #ifndef __DVB_USB_H__ #define __DVB_USB_H__ #include <linux/input.h> #include <linux/usb.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <media/rc-core.h> #include "dvb_frontend.h" #include "dvb_demux.h" #include "dvb_net.h" #include "dmxdev.h" #include "dvb-pll.h" #include "dvb-usb-ids.h" /* debug */ #ifdef CONFIG_DVB_USB_DEBUG #define dprintk(var,level,args...) \ do { if ((var & level)) { printk(args); } } while (0) #define debug_dump(b,l,func) {\ int loop_; \ for (loop_ = 0; loop_ < l; loop_++) func("%02x ", b[loop_]); \ func("\n");\ } #define DVB_USB_DEBUG_STATUS #else #define dprintk(args...) #define debug_dump(b,l,func) #define DVB_USB_DEBUG_STATUS " (debugging is not enabled)" #endif /* generic log methods - taken from usb.h */ #ifndef DVB_USB_LOG_PREFIX #define DVB_USB_LOG_PREFIX "dvb-usb (please define a log prefix)" #endif #undef err #define err(format, arg...) printk(KERN_ERR DVB_USB_LOG_PREFIX ": " format "\n" , ## arg) #undef info #define info(format, arg...) printk(KERN_INFO DVB_USB_LOG_PREFIX ": " format "\n" , ## arg) #undef warn #define warn(format, arg...) printk(KERN_WARNING DVB_USB_LOG_PREFIX ": " format "\n" , ## arg) /** * struct dvb_usb_device_description - name and its according USB IDs * @name: real name of the box, regardless which DVB USB device class is in use * @cold_ids: array of struct usb_device_id which describe the device in * pre-firmware state * @warm_ids: array of struct usb_device_id which describe the device in * post-firmware state * * Each DVB USB device class can have one or more actual devices, this struct * assigns a name to it. */ struct dvb_usb_device_description { const char *name; #define DVB_USB_ID_MAX_NUM 15 struct usb_device_id *cold_ids[DVB_USB_ID_MAX_NUM]; struct usb_device_id *warm_ids[DVB_USB_ID_MAX_NUM]; }; static inline u8 rc5_custom(struct rc_map_table *key) { return (key->scancode >> 8) & 0xff; } static inline u8 rc5_data(struct rc_map_table *key) { return key->scancode & 0xff; } static inline u16 rc5_scan(struct rc_map_table *key) { return key->scancode & 0xffff; } struct dvb_usb_device; struct dvb_usb_adapter; struct usb_data_stream; /** * Properties of USB streaming - TODO this structure should be somewhere else * describes the kind of USB transfer used for data-streaming. * (BULK or ISOC) */ struct usb_data_stream_properties { #define USB_BULK 1 #define USB_ISOC 2 int type; int count; int endpoint; union { struct { int buffersize; /* per URB */ } bulk; struct { int framesperurb; int framesize; int interval; } isoc; } u; }; /** * struct dvb_usb_adapter_properties - properties of a dvb-usb-adapter. * A DVB-USB-Adapter is basically a dvb_adapter which is present on a USB-device. * @caps: capabilities of the DVB USB device. * @pid_filter_count: number of PID filter position in the optional hardware * PID-filter. * @num_frontends: number of frontends of the DVB USB adapter. * @frontend_ctrl: called to power on/off active frontend. * @streaming_ctrl: called to start and stop the MPEG2-TS streaming of the * device (not URB submitting/killing). * @pid_filter_ctrl: called to en/disable the PID filter, if any. * @pid_filter: called to set/unset a PID for filtering. * @frontend_attach: called to attach the possible frontends (fill fe-field * of struct dvb_usb_device). * @tuner_attach: called to attach the correct tuner and to fill pll_addr, * pll_desc and pll_init_buf of struct dvb_usb_device). * @stream: configuration of the USB streaming */ struct dvb_usb_adapter_fe_properties { #define DVB_USB_ADAP_HAS_PID_FILTER 0x01 #define DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF 0x02 #define DVB_USB_ADAP_NEED_PID_FILTERING 0x04 #define DVB_USB_ADAP_RECEIVES_204_BYTE_TS 0x08 #define DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD 0x10 int caps; int pid_filter_count; int (*streaming_ctrl) (struct dvb_usb_adapter *, int); int (*pid_filter_ctrl) (struct dvb_usb_adapter *, int); int (*pid_filter) (struct dvb_usb_adapter *, int, u16, int); int (*frontend_attach) (struct dvb_usb_adapter *); int (*tuner_attach) (struct dvb_usb_adapter *); struct usb_data_stream_properties stream; int size_of_priv; }; #define MAX_NO_OF_FE_PER_ADAP 3 struct dvb_usb_adapter_properties { int size_of_priv; int (*frontend_ctrl) (struct dvb_frontend *, int); int num_frontends; struct dvb_usb_adapter_fe_properties fe[MAX_NO_OF_FE_PER_ADAP]; }; /** * struct dvb_rc_legacy - old properties of remote controller * @rc_map_table: a hard-wired array of struct rc_map_table (NULL to disable * remote control handling). * @rc_map_size: number of items in @rc_map_table. * @rc_query: called to query an event event. * @rc_interval: time in ms between two queries. */ struct dvb_rc_legacy { /* remote control properties */ #define REMOTE_NO_KEY_PRESSED 0x00 #define REMOTE_KEY_PRESSED 0x01 #define REMOTE_KEY_REPEAT 0x02 struct rc_map_table *rc_map_table; int rc_map_size; int (*rc_query) (struct dvb_usb_device *, u32 *, int *); int rc_interval; }; /** * struct dvb_rc properties of remote controller, using rc-core * @rc_codes: name of rc codes table * @protocol: type of protocol(s) currently used by the driver * @allowed_protos: protocol(s) supported by the driver * @driver_type: Used to point if a device supports raw mode * @change_protocol: callback to change protocol * @rc_query: called to query an event event. * @rc_interval: time in ms between two queries. * @bulk_mode: device supports bulk mode for RC (disable polling mode) */ struct dvb_rc { char *rc_codes; u64 protocol; u64 allowed_protos; enum rc_driver_type driver_type; int (*change_protocol)(struct rc_dev *dev, u64 *rc_type); char *module_name; int (*rc_query) (struct dvb_usb_device *d); int rc_interval; bool bulk_mode; /* uses bulk mode */ }; /** * enum dvb_usb_mode - Specifies if it is using a legacy driver or a new one * based on rc-core * This is initialized/used only inside dvb-usb-remote.c. * It shouldn't be set by the drivers. */ enum dvb_usb_mode { DVB_RC_LEGACY, DVB_RC_CORE, }; /** * struct dvb_usb_device_properties - properties of a dvb-usb-device * @usb_ctrl: which USB device-side controller is in use. Needed for firmware * download. * @firmware: name of the firmware file. * @download_firmware: called to download the firmware when the usb_ctrl is * DEVICE_SPECIFIC. * @no_reconnect: device doesn't do a reconnect after downloading the firmware, * so do the warm initialization right after it * * @size_of_priv: how many bytes shall be allocated for the private field * of struct dvb_usb_device. * * @power_ctrl: called to enable/disable power of the device. * @read_mac_address: called to read the MAC address of the device. * @identify_state: called to determine the state (cold or warm), when it * is not distinguishable by the USB IDs. * * @rc: remote controller properties * * @i2c_algo: i2c_algorithm if the device has I2CoverUSB. * * @generic_bulk_ctrl_endpoint: most of the DVB USB devices have a generic * endpoint which received control messages with bulk transfers. When this * is non-zero, one can use dvb_usb_generic_rw and dvb_usb_generic_write- * helper functions. * * @generic_bulk_ctrl_endpoint_response: some DVB USB devices use a separate * endpoint for responses to control messages sent with bulk transfers via * the generic_bulk_ctrl_endpoint. When this is non-zero, this will be used * instead of the generic_bulk_ctrl_endpoint when reading usb responses in * the dvb_usb_generic_rw helper function. * * @num_device_descs: number of struct dvb_usb_device_description in @devices * @devices: array of struct dvb_usb_device_description compatibles with these * properties. */ #define MAX_NO_OF_ADAPTER_PER_DEVICE 2 struct dvb_usb_device_properties { #define DVB_USB_IS_AN_I2C_ADAPTER 0x01 int caps; #define DEVICE_SPECIFIC 0 #define CYPRESS_AN2135 1 #define CYPRESS_AN2235 2 #define CYPRESS_FX2 3 int usb_ctrl; int (*download_firmware) (struct usb_device *, const struct firmware *); const char *firmware; int no_reconnect; int size_of_priv; int num_adapters; struct dvb_usb_adapter_properties adapter[MAX_NO_OF_ADAPTER_PER_DEVICE]; int (*power_ctrl) (struct dvb_usb_device *, int); int (*read_mac_address) (struct dvb_usb_device *, u8 []); int (*identify_state) (struct usb_device *, struct dvb_usb_device_properties *, struct dvb_usb_device_description **, int *); struct { enum dvb_usb_mode mode; /* Drivers shouldn't touch on it */ struct dvb_rc_legacy legacy; struct dvb_rc core; } rc; struct i2c_algorithm *i2c_algo; int generic_bulk_ctrl_endpoint; int generic_bulk_ctrl_endpoint_response; int num_device_descs; struct dvb_usb_device_description devices[12]; }; /** * struct usb_data_stream - generic object of an USB stream * @buf_num: number of buffer allocated. * @buf_size: size of each buffer in buf_list. * @buf_list: array containing all allocate buffers for streaming. * @dma_addr: list of dma_addr_t for each buffer in buf_list. * * @urbs_initialized: number of URBs initialized. * @urbs_submitted: number of URBs submitted. */ #define MAX_NO_URBS_FOR_DATA_STREAM 10 struct usb_data_stream { struct usb_device *udev; struct usb_data_stream_properties props; #define USB_STATE_INIT 0x00 #define USB_STATE_URB_BUF 0x01 int state; void (*complete) (struct usb_data_stream *, u8 *, size_t); struct urb *urb_list[MAX_NO_URBS_FOR_DATA_STREAM]; int buf_num; unsigned long buf_size; u8 *buf_list[MAX_NO_URBS_FOR_DATA_STREAM]; dma_addr_t dma_addr[MAX_NO_URBS_FOR_DATA_STREAM]; int urbs_initialized; int urbs_submitted; void *user_priv; }; /** * struct dvb_usb_adapter - a DVB adapter on a USB device * @id: index of this adapter (starting with 0). * * @feedcount: number of reqested feeds (used for streaming-activation) * @pid_filtering: is hardware pid_filtering used or not. * * @pll_addr: I2C address of the tuner for programming * @pll_init: array containing the initialization buffer * @pll_desc: pointer to the appropriate struct dvb_pll_desc * @tuner_pass_ctrl: called to (de)activate tuner passthru of the demod or the board * * @dvb_adap: device's dvb_adapter. * @dmxdev: device's dmxdev. * @demux: device's software demuxer. * @dvb_net: device's dvb_net interfaces. * @dvb_frontend: device's frontend. * @max_feed_count: how many feeds can be handled simultaneously by this * device * * @fe_init: rerouted frontend-init (wakeup) function. * @fe_sleep: rerouted frontend-sleep function. * * @stream: the usb data stream. */ struct dvb_usb_fe_adapter { struct dvb_frontend *fe; int (*fe_init) (struct dvb_frontend *); int (*fe_sleep) (struct dvb_frontend *); struct usb_data_stream stream; int pid_filtering; int max_feed_count; void *priv; }; struct dvb_usb_adapter { struct dvb_usb_device *dev; struct dvb_usb_adapter_properties props; #define DVB_USB_ADAP_STATE_INIT 0x000 #define DVB_USB_ADAP_STATE_DVB 0x001 int state; u8 id; int feedcount; /* dvb */ struct dvb_adapter dvb_adap; struct dmxdev dmxdev; struct dvb_demux demux; struct dvb_net dvb_net; struct dvb_usb_fe_adapter fe_adap[MAX_NO_OF_FE_PER_ADAP]; int active_fe; int num_frontends_initialized; void *priv; }; /** * struct dvb_usb_device - object of a DVB USB device * @props: copy of the struct dvb_usb_properties this device belongs to. * @desc: pointer to the device's struct dvb_usb_device_description. * @state: initialization and runtime state of the device. * * @powered: indicated whether the device is power or not. * Powered is in/decremented for each call to modify the state. * @udev: pointer to the device's struct usb_device. * * @data_mutex: mutex to protect the data structure used to store URB data * @usb_mutex: mutex of USB control messages (reading needs two messages). * Please notice that this mutex is used internally at the generic * URB control functions. So, drivers using dvb_usb_generic_rw() and * derivated functions should not lock it internally. * @i2c_mutex: mutex for i2c-transfers * * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB * * @rc_dev: rc device for the remote control (rc-core mode) * @input_dev: input device for the remote control (legacy mode) * @rc_query_work: struct work_struct frequent rc queries * @last_event: last triggered event * @last_state: last state (no, pressed, repeat) * @owner: owner of the dvb_adapter * @priv: private data of the actual driver (allocate by dvb-usb, size defined * in size_of_priv of dvb_usb_properties). */ struct dvb_usb_device { struct dvb_usb_device_properties props; struct dvb_usb_device_description *desc; struct usb_device *udev; #define DVB_USB_STATE_INIT 0x000 #define DVB_USB_STATE_I2C 0x001 #define DVB_USB_STATE_DVB 0x002 #define DVB_USB_STATE_REMOTE 0x004 int state; int powered; /* locking */ struct mutex data_mutex; struct mutex usb_mutex; /* i2c */ struct mutex i2c_mutex; struct i2c_adapter i2c_adap; int num_adapters_initialized; struct dvb_usb_adapter adapter[MAX_NO_OF_ADAPTER_PER_DEVICE]; /* remote control */ struct rc_dev *rc_dev; struct input_dev *input_dev; char rc_phys[64]; struct delayed_work rc_query_work; u32 last_event; int last_state; struct module *owner; void *priv; }; extern int dvb_usb_device_init(struct usb_interface *, struct dvb_usb_device_properties *, struct module *, struct dvb_usb_device **, short *adapter_nums); extern void dvb_usb_device_exit(struct usb_interface *); /* the generic read/write method for device control */ extern int __must_check dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16, int); extern int __must_check dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16); /* commonly used remote control parsing */ extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *); /* commonly used firmware download types and function */ struct hexline { u8 len; u32 addr; u8 type; u8 data[255]; u8 chk; }; extern int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type); extern int dvb_usb_get_hexline(const struct firmware *fw, struct hexline *hx, int *pos); #endif
null
null
null
null
108,097
18,381
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,381
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_DOWNLOAD_INTERNAL_BACKGROUND_SERVICE_CONTROLLER_H_ #define COMPONENTS_DOWNLOAD_INTERNAL_BACKGROUND_SERVICE_CONTROLLER_H_ #include <string> #include "base/macros.h" #include "components/download/public/background_service/clients.h" #include "components/download/public/background_service/download_service.h" #include "components/download/public/background_service/download_task_types.h" namespace download { struct DownloadParams; struct SchedulingParams; // The type of completion when the download entry transits to complete state. // TODO(xingliu): Implement timeout and unknown failure types. enum class CompletionType { // The download is successfully finished. SUCCEED = 0, // The download is interrupted and failed. FAIL = 1, // The download is aborted by the client. ABORT = 2, // The download is timed out and the connection is closed. TIMEOUT = 3, // The download is failed for unknown reasons. UNKNOWN = 4, // The download is cancelled by the client. CANCEL = 5, // The download expended it's number of expensive retries. OUT_OF_RETRIES = 6, // The download expended it's number of 'free' retries. OUT_OF_RESUMPTIONS = 7, // The upload was timed out due to unresponsive client. UPLOAD_TIMEOUT = 8, // The count of entries for the enum. COUNT = 9, }; // The core Controller responsible for gluing various DownloadService components // together to manage the active downloads. class Controller { public: enum class State { // The Controller has been created but has not been initialized yet. It // cannot be used. CREATED = 1, // The Controller has been created and Initialize() has been called but has // not yet finished. It cannot be used. INITIALIZING = 2, // The Controller has been created and initialized. It can be used. READY = 3, // The Controller failed to initialize and is in the process of recovering. // It cannot be used. RECOVERING = 4, // The Controller was unable to recover and is unusable this session. UNAVAILABLE = 5, }; Controller() = default; virtual ~Controller() = default; // Initializes the controller. Initialization may be asynchronous. virtual void Initialize(const base::Closure& callback) = 0; // Returns the status of Controller. virtual State GetState() = 0; // Starts a download with |params|. See DownloadParams::StartCallback and // DownloadParams::StartResponse for information on how a caller can determine // whether or not the download was successfully accepted and queued. virtual void StartDownload(const DownloadParams& params) = 0; // Pauses a download request associated with |guid| if one exists. virtual void PauseDownload(const std::string& guid) = 0; // Resumes a download request associated with |guid| if one exists. The // download request may or may not start downloading at this time, but it will // no longer be blocked by any prior PauseDownload() actions. virtual void ResumeDownload(const std::string& guid) = 0; // Cancels a download request associated with |guid| if one exists. virtual void CancelDownload(const std::string& guid) = 0; // Changes the SchedulingParams of a download request associated with |guid| // to |params|. virtual void ChangeDownloadCriteria(const std::string& guid, const SchedulingParams& params) = 0; // Exposes the owner of the download request for |guid| if one exists. // Otherwise returns DownloadClient::INVALID for an unowned entry. virtual DownloadClient GetOwnerOfDownload(const std::string& guid) = 0; // See DownloadService::OnStartScheduledTask. virtual void OnStartScheduledTask(DownloadTaskType task_type, const TaskFinishedCallback& callback) = 0; // See DownloadService::OnStopScheduledTask. virtual bool OnStopScheduledTask(DownloadTaskType task_type) = 0; private: DISALLOW_COPY_AND_ASSIGN(Controller); }; } // namespace download #endif // COMPONENTS_DOWNLOAD_INTERNAL_BACKGROUND_SERVICE_CONTROLLER_H_
null
null
null
null
15,244
61,725
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
61,725
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ssl/certificate_error_reporter.h" #include <stdint.h> #include <string.h> #include <set> #include <string> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/macros.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/strings/string_piece.h" #include "base/test/bind_test_util.h" #include "components/encrypted_messages/encrypted_message.pb.h" #include "components/encrypted_messages/message_encrypter.h" #include "content/public/common/weak_wrapper_shared_url_loader_factory.h" #include "net/http/http_status_code.h" #include "net/traffic_annotation/network_traffic_annotation_test_helper.h" #include "services/network/public/cpp/resource_request.h" #include "services/network/test/test_url_loader_factory.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/boringssl/src/include/openssl/curve25519.h" namespace { static const char kHkdfLabel[] = "certificate report"; const char kDummyHttpReportUri[] = "http://example.test"; const char kDummyHttpsReportUri[] = "https://example.test"; const char kDummyReport[] = "a dummy report"; const uint32_t kServerPublicKeyTestVersion = 16; class ErrorReporterTest : public ::testing::Test { public: ErrorReporterTest() : test_shared_loader_factory_( base::MakeRefCounted<content::WeakWrapperSharedURLLoaderFactory>( &test_url_loader_factory_)) { memset(server_private_key_, 1, sizeof(server_private_key_)); X25519_public_from_private(server_public_key_, server_private_key_); } ~ErrorReporterTest() override {} protected: base::MessageLoopForIO loop_; uint8_t server_public_key_[32]; uint8_t server_private_key_[32]; network::TestURLLoaderFactory test_url_loader_factory_; scoped_refptr<network::SharedURLLoaderFactory> test_shared_loader_factory_; DISALLOW_COPY_AND_ASSIGN(ErrorReporterTest); }; // Test that ErrorReporter::SendExtendedReportingReport sends // an encrypted or plaintext extended reporting report as appropriate. TEST_F(ErrorReporterTest, ExtendedReportingSendReport) { GURL latest_report_uri; std::string latest_report; std::string latest_content_type; test_url_loader_factory_.SetInterceptor( base::BindLambdaForTesting([&](const network::ResourceRequest& request) { latest_report_uri = request.url; request.headers.GetHeader(net::HttpRequestHeaders::kContentType, &latest_content_type); auto body = request.request_body; CHECK_EQ(1u, body->elements()->size()); auto& element = body->elements()->at(0); CHECK_EQ(network::DataElement::TYPE_BYTES, element.type()); latest_report = std::string(element.bytes(), element.length()); })); // Data should not be encrypted when sent to an HTTPS URL. const GURL https_url(kDummyHttpsReportUri); CertificateErrorReporter https_reporter(test_shared_loader_factory_, https_url, server_public_key_, kServerPublicKeyTestVersion); https_reporter.SendExtendedReportingReport( kDummyReport, base::OnceCallback<void()>(), base::OnceCallback<void(int, int)>()); EXPECT_EQ(latest_report_uri, https_url); EXPECT_EQ(latest_report, kDummyReport); // Data should be encrypted when sent to an HTTP URL. const GURL http_url(kDummyHttpReportUri); CertificateErrorReporter http_reporter(test_shared_loader_factory_, http_url, server_public_key_, kServerPublicKeyTestVersion); http_reporter.SendExtendedReportingReport( kDummyReport, base::OnceCallback<void()>(), base::OnceCallback<void(int, int)>()); EXPECT_EQ(latest_report_uri, http_url); EXPECT_EQ("application/octet-stream", latest_content_type); std::string uploaded_report; encrypted_messages::EncryptedMessage encrypted_report; ASSERT_TRUE(encrypted_report.ParseFromString(latest_report)); EXPECT_EQ(kServerPublicKeyTestVersion, encrypted_report.server_public_key_version()); EXPECT_EQ( encrypted_messages::EncryptedMessage::AEAD_ECDH_AES_128_CTR_HMAC_SHA256, encrypted_report.algorithm()); // TODO(estark): kHkdfLabel needs to include the null character in the label // due to a matching error in the server for the case of certificate // reporting, the strlen + 1 can be removed once that error is fixed. // https://crbug.com/517746 ASSERT_TRUE(encrypted_messages::DecryptMessageForTesting( server_private_key_, base::StringPiece(kHkdfLabel, strlen(kHkdfLabel) + 1), encrypted_report, &uploaded_report)); EXPECT_EQ(kDummyReport, uploaded_report); } // Tests that an UMA histogram is recorded if a report fails to send. TEST_F(ErrorReporterTest, ErroredRequestCallsCallback) { base::RunLoop run_loop; const GURL report_uri("http://foo.com/bar"); test_url_loader_factory_.AddResponse( report_uri, network::ResourceResponseHead(), std::string(), network::URLLoaderCompletionStatus(net::ERR_CONNECTION_FAILED)); CertificateErrorReporter reporter(test_shared_loader_factory_, report_uri); reporter.SendExtendedReportingReport( kDummyReport, base::BindLambdaForTesting([&]() { FAIL(); }), base::BindLambdaForTesting( [&](int net_error, int http_response_code) { run_loop.Quit(); })); run_loop.Run(); } // Tests that an UMA histogram is recorded if a report is successfully sent. TEST_F(ErrorReporterTest, SuccessfulRequestCallsCallback) { base::RunLoop run_loop; const GURL report_uri("http://foo.com/bar"); test_url_loader_factory_.AddResponse(report_uri.spec(), "some data"); CertificateErrorReporter reporter(test_shared_loader_factory_, report_uri); reporter.SendExtendedReportingReport( kDummyReport, base::BindLambdaForTesting([&]() { run_loop.Quit(); }), base::BindLambdaForTesting( [&](int net_error, int http_response_code) { FAIL(); })); run_loop.Run(); } } // namespace
null
null
null
null
58,588
1,917
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
166,912
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/workqueue.h> #include <linux/rtnetlink.h> #include <linux/cache.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/rculist.h> #include <linux/nsproxy.h> #include <linux/fs.h> #include <linux/proc_ns.h> #include <linux/file.h> #include <linux/export.h> #include <linux/user_namespace.h> #include <linux/net_namespace.h> #include <linux/sched/task.h> #include <net/sock.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> /* * Our network namespace constructor/destructor lists */ static LIST_HEAD(pernet_list); static struct list_head *first_device = &pernet_list; DEFINE_MUTEX(net_mutex); LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); struct net init_net = { .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), }; EXPORT_SYMBOL(init_net); static bool init_net_initialized; #define MIN_PERNET_OPS_ID \ ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; static struct net_generic *net_alloc_generic(void) { struct net_generic *ng; unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); ng = kzalloc(generic_size, GFP_KERNEL); if (ng) ng->s.len = max_gen_ptrs; return ng; } static int net_assign_generic(struct net *net, unsigned int id, void *data) { struct net_generic *ng, *old_ng; BUG_ON(!mutex_is_locked(&net_mutex)); BUG_ON(id < MIN_PERNET_OPS_ID); old_ng = rcu_dereference_protected(net->gen, lockdep_is_held(&net_mutex)); if (old_ng->s.len > id) { old_ng->ptr[id] = data; return 0; } ng = net_alloc_generic(); if (ng == NULL) return -ENOMEM; /* * Some synchronisation notes: * * The net_generic explores the net->gen array inside rcu * read section. Besides once set the net->gen->ptr[x] * pointer never changes (see rules in netns/generic.h). * * That said, we simply duplicate this array and schedule * the old copy for kfree after a grace period. */ memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); ng->ptr[id] = data; rcu_assign_pointer(net->gen, ng); kfree_rcu(old_ng, s.rcu); return 0; } static int ops_init(const struct pernet_operations *ops, struct net *net) { int err = -ENOMEM; void *data = NULL; if (ops->id && ops->size) { data = kzalloc(ops->size, GFP_KERNEL); if (!data) goto out; err = net_assign_generic(net, *ops->id, data); if (err) goto cleanup; } err = 0; if (ops->init) err = ops->init(net); if (!err) return 0; cleanup: kfree(data); out: return err; } static void ops_free(const struct pernet_operations *ops, struct net *net) { if (ops->id && ops->size) { kfree(net_generic(net, *ops->id)); } } static void ops_exit_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { struct net *net; if (ops->exit) { list_for_each_entry(net, net_exit_list, exit_list) ops->exit(net); } if (ops->exit_batch) ops->exit_batch(net_exit_list); } static void ops_free_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { struct net *net; if (ops->size && ops->id) { list_for_each_entry(net, net_exit_list, exit_list) ops_free(ops, net); } } /* should be called with nsid_lock held */ static int alloc_netid(struct net *net, struct net *peer, int reqid) { int min = 0, max = 0; if (reqid >= 0) { min = reqid; max = reqid + 1; } return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); } /* This function is used by idr_for_each(). If net is equal to peer, the * function returns the id so that idr_for_each() stops. Because we cannot * returns the id 0 (idr_for_each() will not stop), we return the magic value * NET_ID_ZERO (-1) for it. */ #define NET_ID_ZERO -1 static int net_eq_idr(int id, void *net, void *peer) { if (net_eq(net, peer)) return id ? : NET_ID_ZERO; return 0; } /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc * is set to true, thus the caller knows that the new id must be notified via * rtnl. */ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) { int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); bool alloc_it = *alloc; *alloc = false; /* Magic value for id 0. */ if (id == NET_ID_ZERO) return 0; if (id > 0) return id; if (alloc_it) { id = alloc_netid(net, peer, -1); *alloc = true; return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; } return NETNSA_NSID_NOT_ASSIGNED; } /* should be called with nsid_lock held */ static int __peernet2id(struct net *net, struct net *peer) { bool no = false; return __peernet2id_alloc(net, peer, &no); } static void rtnl_net_notifyid(struct net *net, int cmd, int id); /* This function returns the id of a peer netns. If no id is assigned, one will * be allocated and returned. */ int peernet2id_alloc(struct net *net, struct net *peer) { bool alloc; int id; if (atomic_read(&net->count) == 0) return NETNSA_NSID_NOT_ASSIGNED; spin_lock_bh(&net->nsid_lock); alloc = atomic_read(&peer->count) == 0 ? false : true; id = __peernet2id_alloc(net, peer, &alloc); spin_unlock_bh(&net->nsid_lock); if (alloc && id >= 0) rtnl_net_notifyid(net, RTM_NEWNSID, id); return id; } /* This function returns, if assigned, the id of a peer netns. */ int peernet2id(struct net *net, struct net *peer) { int id; spin_lock_bh(&net->nsid_lock); id = __peernet2id(net, peer); spin_unlock_bh(&net->nsid_lock); return id; } EXPORT_SYMBOL(peernet2id); /* This function returns true is the peer netns has an id assigned into the * current netns. */ bool peernet_has_id(struct net *net, struct net *peer) { return peernet2id(net, peer) >= 0; } /* * setup_net runs the initializers for the network namespace object. */ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) { /* Must be called with net_mutex held */ const struct pernet_operations *ops, *saved_ops; int error = 0; LIST_HEAD(net_exit_list); atomic_set(&net->count, 1); atomic_set(&net->passive, 1); net->dev_base_seq = 1; net->user_ns = user_ns; idr_init(&net->netns_ids); spin_lock_init(&net->nsid_lock); list_for_each_entry(ops, &pernet_list, list) { error = ops_init(ops, net); if (error < 0) goto out_undo; } out: return error; out_undo: /* Walk through the list backwards calling the exit functions * for the pernet modules whose init functions did not fail. */ list_add(&net->exit_list, &net_exit_list); saved_ops = ops; list_for_each_entry_continue_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); ops = saved_ops; list_for_each_entry_continue_reverse(ops, &pernet_list, list) ops_free_list(ops, &net_exit_list); rcu_barrier(); goto out; } #ifdef CONFIG_NET_NS static struct ucounts *inc_net_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); } static void dec_net_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); } static struct kmem_cache *net_cachep; static struct workqueue_struct *netns_wq; static struct net *net_alloc(void) { struct net *net = NULL; struct net_generic *ng; ng = net_alloc_generic(); if (!ng) goto out; net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); if (!net) goto out_free; rcu_assign_pointer(net->gen, ng); out: return net; out_free: kfree(ng); goto out; } static void net_free(struct net *net) { kfree(rcu_access_pointer(net->gen)); kmem_cache_free(net_cachep, net); } void net_drop_ns(void *p) { struct net *ns = p; if (ns && atomic_dec_and_test(&ns->passive)) net_free(ns); } struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, struct net *old_net) { struct ucounts *ucounts; struct net *net; int rv; if (!(flags & CLONE_NEWNET)) return get_net(old_net); ucounts = inc_net_namespaces(user_ns); if (!ucounts) return ERR_PTR(-ENOSPC); net = net_alloc(); if (!net) { dec_net_namespaces(ucounts); return ERR_PTR(-ENOMEM); } get_user_ns(user_ns); rv = mutex_lock_killable(&net_mutex); if (rv < 0) { net_free(net); dec_net_namespaces(ucounts); put_user_ns(user_ns); return ERR_PTR(rv); } net->ucounts = ucounts; rv = setup_net(net, user_ns); if (rv == 0) { rtnl_lock(); list_add_tail_rcu(&net->list, &net_namespace_list); rtnl_unlock(); } mutex_unlock(&net_mutex); if (rv < 0) { dec_net_namespaces(ucounts); put_user_ns(user_ns); net_drop_ns(net); return ERR_PTR(rv); } return net; } static DEFINE_SPINLOCK(cleanup_list_lock); static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ static void cleanup_net(struct work_struct *work) { const struct pernet_operations *ops; struct net *net, *tmp; struct list_head net_kill_list; LIST_HEAD(net_exit_list); /* Atomically snapshot the list of namespaces to cleanup */ spin_lock_irq(&cleanup_list_lock); list_replace_init(&cleanup_list, &net_kill_list); spin_unlock_irq(&cleanup_list_lock); mutex_lock(&net_mutex); /* Don't let anyone else find us. */ rtnl_lock(); list_for_each_entry(net, &net_kill_list, cleanup_list) { list_del_rcu(&net->list); list_add_tail(&net->exit_list, &net_exit_list); for_each_net(tmp) { int id; spin_lock_bh(&tmp->nsid_lock); id = __peernet2id(tmp, net); if (id >= 0) idr_remove(&tmp->netns_ids, id); spin_unlock_bh(&tmp->nsid_lock); if (id >= 0) rtnl_net_notifyid(tmp, RTM_DELNSID, id); } spin_lock_bh(&net->nsid_lock); idr_destroy(&net->netns_ids); spin_unlock_bh(&net->nsid_lock); } rtnl_unlock(); /* * Another CPU might be rcu-iterating the list, wait for it. * This needs to be before calling the exit() notifiers, so * the rcu_barrier() below isn't sufficient alone. */ synchronize_rcu(); /* Run all of the network namespace exit methods */ list_for_each_entry_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); /* Free the net generic variables */ list_for_each_entry_reverse(ops, &pernet_list, list) ops_free_list(ops, &net_exit_list); mutex_unlock(&net_mutex); /* Ensure there are no outstanding rcu callbacks using this * network namespace. */ rcu_barrier(); /* Finally it is safe to free my network namespace structure */ list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { list_del_init(&net->exit_list); dec_net_namespaces(net->ucounts); put_user_ns(net->user_ns); net_drop_ns(net); } } static DECLARE_WORK(net_cleanup_work, cleanup_net); void __put_net(struct net *net) { /* Cleanup the network namespace in process context */ unsigned long flags; spin_lock_irqsave(&cleanup_list_lock, flags); list_add(&net->cleanup_list, &cleanup_list); spin_unlock_irqrestore(&cleanup_list_lock, flags); queue_work(netns_wq, &net_cleanup_work); } EXPORT_SYMBOL_GPL(__put_net); struct net *get_net_ns_by_fd(int fd) { struct file *file; struct ns_common *ns; struct net *net; file = proc_ns_fget(fd); if (IS_ERR(file)) return ERR_CAST(file); ns = get_proc_ns(file_inode(file)); if (ns->ops == &netns_operations) net = get_net(container_of(ns, struct net, ns)); else net = ERR_PTR(-EINVAL); fput(file); return net; } #else struct net *get_net_ns_by_fd(int fd) { return ERR_PTR(-EINVAL); } #endif EXPORT_SYMBOL_GPL(get_net_ns_by_fd); struct net *get_net_ns_by_pid(pid_t pid) { struct task_struct *tsk; struct net *net; /* Lookup the network namespace */ net = ERR_PTR(-ESRCH); rcu_read_lock(); tsk = find_task_by_vpid(pid); if (tsk) { struct nsproxy *nsproxy; task_lock(tsk); nsproxy = tsk->nsproxy; if (nsproxy) net = get_net(nsproxy->net_ns); task_unlock(tsk); } rcu_read_unlock(); return net; } EXPORT_SYMBOL_GPL(get_net_ns_by_pid); static __net_init int net_ns_net_init(struct net *net) { #ifdef CONFIG_NET_NS net->ns.ops = &netns_operations; #endif return ns_alloc_inum(&net->ns); } static __net_exit void net_ns_net_exit(struct net *net) { ns_free_inum(&net->ns); } static struct pernet_operations __net_initdata net_ns_ops = { .init = net_ns_net_init, .exit = net_ns_net_exit, }; static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { [NETNSA_NONE] = { .type = NLA_UNSPEC }, [NETNSA_NSID] = { .type = NLA_S32 }, [NETNSA_PID] = { .type = NLA_U32 }, [NETNSA_FD] = { .type = NLA_U32 }, }; static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; struct net *peer; int nsid, err; err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy); if (err < 0) return err; if (!tb[NETNSA_NSID]) return -EINVAL; nsid = nla_get_s32(tb[NETNSA_NSID]); if (tb[NETNSA_PID]) peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); else if (tb[NETNSA_FD]) peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); else return -EINVAL; if (IS_ERR(peer)) return PTR_ERR(peer); spin_lock_bh(&net->nsid_lock); if (__peernet2id(net, peer) >= 0) { spin_unlock_bh(&net->nsid_lock); err = -EEXIST; goto out; } err = alloc_netid(net, peer, nsid); spin_unlock_bh(&net->nsid_lock); if (err >= 0) { rtnl_net_notifyid(net, RTM_NEWNSID, err); err = 0; } out: put_net(peer); return err; } static int rtnl_net_get_size(void) { return NLMSG_ALIGN(sizeof(struct rtgenmsg)) + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ ; } static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, int cmd, struct net *net, int nsid) { struct nlmsghdr *nlh; struct rtgenmsg *rth; nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); if (!nlh) return -EMSGSIZE; rth = nlmsg_data(nlh); rth->rtgen_family = AF_UNSPEC; if (nla_put_s32(skb, NETNSA_NSID, nsid)) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; struct sk_buff *msg; struct net *peer; int err, id; err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy); if (err < 0) return err; if (tb[NETNSA_PID]) peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); else if (tb[NETNSA_FD]) peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); else return -EINVAL; if (IS_ERR(peer)) return PTR_ERR(peer); msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out; } id = peernet2id(net, peer); err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, RTM_NEWNSID, net, id); if (err < 0) goto err_out; err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); goto out; err_out: nlmsg_free(msg); out: put_net(peer); return err; } struct rtnl_net_dump_cb { struct net *net; struct sk_buff *skb; struct netlink_callback *cb; int idx; int s_idx; }; static int rtnl_net_dumpid_one(int id, void *peer, void *data) { struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; int ret; if (net_cb->idx < net_cb->s_idx) goto cont; ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWNSID, net_cb->net, id); if (ret < 0) return ret; cont: net_cb->idx++; return 0; } static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct rtnl_net_dump_cb net_cb = { .net = net, .skb = skb, .cb = cb, .idx = 0, .s_idx = cb->args[0], }; spin_lock_bh(&net->nsid_lock); idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); spin_unlock_bh(&net->nsid_lock); cb->args[0] = net_cb.idx; return skb->len; } static void rtnl_net_notifyid(struct net *net, int cmd, int id) { struct sk_buff *msg; int err = -ENOMEM; msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); if (!msg) goto out; err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); if (err < 0) goto err_out; rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); return; err_out: nlmsg_free(msg); out: rtnl_set_sk_err(net, RTNLGRP_NSID, err); } static int __init net_ns_init(void) { struct net_generic *ng; #ifdef CONFIG_NET_NS net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), SMP_CACHE_BYTES, SLAB_PANIC, NULL); /* Create workqueue for cleanup */ netns_wq = create_singlethread_workqueue("netns"); if (!netns_wq) panic("Could not create netns workq"); #endif ng = net_alloc_generic(); if (!ng) panic("Could not allocate generic netns"); rcu_assign_pointer(init_net.gen, ng); mutex_lock(&net_mutex); if (setup_net(&init_net, &init_user_ns)) panic("Could not setup the initial network namespace"); init_net_initialized = true; rtnl_lock(); list_add_tail_rcu(&init_net.list, &net_namespace_list); rtnl_unlock(); mutex_unlock(&net_mutex); register_pernet_subsys(&net_ns_ops); rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, NULL); return 0; } pure_initcall(net_ns_init); #ifdef CONFIG_NET_NS static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { struct net *net; int error; LIST_HEAD(net_exit_list); list_add_tail(&ops->list, list); if (ops->init || (ops->id && ops->size)) { for_each_net(net) { error = ops_init(ops, net); if (error) goto out_undo; list_add_tail(&net->exit_list, &net_exit_list); } } return 0; out_undo: /* If I have an error cleanup all namespaces I initialized */ list_del(&ops->list); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); return error; } static void __unregister_pernet_operations(struct pernet_operations *ops) { struct net *net; LIST_HEAD(net_exit_list); list_del(&ops->list); for_each_net(net) list_add_tail(&net->exit_list, &net_exit_list); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); } #else static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { if (!init_net_initialized) { list_add_tail(&ops->list, list); return 0; } return ops_init(ops, &init_net); } static void __unregister_pernet_operations(struct pernet_operations *ops) { if (!init_net_initialized) { list_del(&ops->list); } else { LIST_HEAD(net_exit_list); list_add(&init_net.exit_list, &net_exit_list); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); } } #endif /* CONFIG_NET_NS */ static DEFINE_IDA(net_generic_ids); static int register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { int error; if (ops->id) { again: error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); if (error < 0) { if (error == -EAGAIN) { ida_pre_get(&net_generic_ids, GFP_KERNEL); goto again; } return error; } max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); } error = __register_pernet_operations(list, ops); if (error) { rcu_barrier(); if (ops->id) ida_remove(&net_generic_ids, *ops->id); } return error; } static void unregister_pernet_operations(struct pernet_operations *ops) { __unregister_pernet_operations(ops); rcu_barrier(); if (ops->id) ida_remove(&net_generic_ids, *ops->id); } /** * register_pernet_subsys - register a network namespace subsystem * @ops: pernet operations structure for the subsystem * * Register a subsystem which has init and exit functions * that are called when network namespaces are created and * destroyed respectively. * * When registered all network namespace init functions are * called for every existing network namespace. Allowing kernel * modules to have a race free view of the set of network namespaces. * * When a new network namespace is created all of the init * methods are called in the order in which they were registered. * * When a network namespace is destroyed all of the exit methods * are called in the reverse of the order with which they were * registered. */ int register_pernet_subsys(struct pernet_operations *ops) { int error; mutex_lock(&net_mutex); error = register_pernet_operations(first_device, ops); mutex_unlock(&net_mutex); return error; } EXPORT_SYMBOL_GPL(register_pernet_subsys); /** * unregister_pernet_subsys - unregister a network namespace subsystem * @ops: pernet operations structure to manipulate * * Remove the pernet operations structure from the list to be * used when network namespaces are created or destroyed. In * addition run the exit method for all existing network * namespaces. */ void unregister_pernet_subsys(struct pernet_operations *ops) { mutex_lock(&net_mutex); unregister_pernet_operations(ops); mutex_unlock(&net_mutex); } EXPORT_SYMBOL_GPL(unregister_pernet_subsys); /** * register_pernet_device - register a network namespace device * @ops: pernet operations structure for the subsystem * * Register a device which has init and exit functions * that are called when network namespaces are created and * destroyed respectively. * * When registered all network namespace init functions are * called for every existing network namespace. Allowing kernel * modules to have a race free view of the set of network namespaces. * * When a new network namespace is created all of the init * methods are called in the order in which they were registered. * * When a network namespace is destroyed all of the exit methods * are called in the reverse of the order with which they were * registered. */ int register_pernet_device(struct pernet_operations *ops) { int error; mutex_lock(&net_mutex); error = register_pernet_operations(&pernet_list, ops); if (!error && (first_device == &pernet_list)) first_device = &ops->list; mutex_unlock(&net_mutex); return error; } EXPORT_SYMBOL_GPL(register_pernet_device); /** * unregister_pernet_device - unregister a network namespace netdevice * @ops: pernet operations structure to manipulate * * Remove the pernet operations structure from the list to be * used when network namespaces are created or destroyed. In * addition run the exit method for all existing network * namespaces. */ void unregister_pernet_device(struct pernet_operations *ops) { mutex_lock(&net_mutex); if (&ops->list == first_device) first_device = first_device->next; unregister_pernet_operations(ops); mutex_unlock(&net_mutex); } EXPORT_SYMBOL_GPL(unregister_pernet_device); #ifdef CONFIG_NET_NS static struct ns_common *netns_get(struct task_struct *task) { struct net *net = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) net = get_net(nsproxy->net_ns); task_unlock(task); return net ? &net->ns : NULL; } static inline struct net *to_net_ns(struct ns_common *ns) { return container_of(ns, struct net, ns); } static void netns_put(struct ns_common *ns) { put_net(to_net_ns(ns)); } static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) { struct net *net = to_net_ns(ns); if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; put_net(nsproxy->net_ns); nsproxy->net_ns = get_net(net); return 0; } static struct user_namespace *netns_owner(struct ns_common *ns) { return to_net_ns(ns)->user_ns; } const struct proc_ns_operations netns_operations = { .name = "net", .type = CLONE_NEWNET, .get = netns_get, .put = netns_put, .install = netns_install, .owner = netns_owner, }; #endif
null
null
null
null
75,260
10,981
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
175,976
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * apb.h: Advanced PCI Bridge Configuration Registers and Bits * * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) */ #ifndef _SPARC64_APB_H #define _SPARC64_APB_H #define APB_TICK_REGISTER 0xb0 #define APB_INT_ACK 0xb8 #define APB_PRIMARY_MASTER_RETRY_LIMIT 0xc0 #define APB_DMA_ASFR 0xc8 #define APB_DMA_AFAR 0xd0 #define APB_PIO_TARGET_RETRY_LIMIT 0xd8 #define APB_PIO_TARGET_LATENCY_TIMER 0xd9 #define APB_DMA_TARGET_RETRY_LIMIT 0xda #define APB_DMA_TARGET_LATENCY_TIMER 0xdb #define APB_SECONDARY_MASTER_RETRY_LIMIT 0xdc #define APB_SECONDARY_CONTROL 0xdd #define APB_IO_ADDRESS_MAP 0xde #define APB_MEM_ADDRESS_MAP 0xdf #define APB_PCI_CONTROL_LOW 0xe0 # define APB_PCI_CTL_LOW_ARB_PARK (1 << 21) # define APB_PCI_CTL_LOW_ERRINT_EN (1 << 8) #define APB_PCI_CONTROL_HIGH 0xe4 # define APB_PCI_CTL_HIGH_SERR (1 << 2) # define APB_PCI_CTL_HIGH_ARBITER_EN (1 << 0) #define APB_PIO_ASFR 0xe8 #define APB_PIO_AFAR 0xf0 #define APB_DIAG_REGISTER 0xf8 #endif /* !(_SPARC64_APB_H) */
null
null
null
null
84,323
27,567
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
27,567
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the Chromium LICENSE file. #ifndef TESTS_TIMING_H #define TESTS_TIMING_H #include <assert.h> #if defined(_WIN32) #include <windows.h> #else #include <sys/time.h> #endif #include <time.h> #if defined(_WIN32) static double seconds() { static double clock_frequency; static bool have_frequency; LARGE_INTEGER qpc; QueryPerformanceCounter(&qpc); if (have_frequency) return qpc.QuadPart * clock_frequency; have_frequency = true; QueryPerformanceFrequency(&qpc); clock_frequency = 1.0 / (double) qpc.QuadPart; return seconds(); } #else static double seconds() { struct timeval now; gettimeofday(&now, 0); return now.tv_sec + now.tv_usec * (1.0 / 1000000.0); } #endif #define TIME(function, time) do { \ double start = seconds(); \ (function); \ *time += seconds() - start; \ } while (0) #endif // TESTS_TIMING_H
null
null
null
null
24,430
56,802
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
56,802
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CHROMEOS_SMB_CLIENT_DISCOVERY_MDNS_HOST_LOCATOR_H_ #define CHROME_BROWSER_CHROMEOS_SMB_CLIENT_DISCOVERY_MDNS_HOST_LOCATOR_H_ #include <map> #include <memory> #include <string> #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "chrome/browser/chromeos/smb_client/discovery/host_locator.h" #include "net/dns/mdns_client.h" namespace chromeos { namespace smb_client { // Removes .local from |raw_hostname| if located at the end of the string and // returns the new hostname. Hostname RemoveLocal(const std::string& raw_hostname); // HostLocator implementation that uses mDns to locate hosts. class MDnsHostLocator : public HostLocator, public base::SupportsWeakPtr<MDnsHostLocator> { public: MDnsHostLocator(); ~MDnsHostLocator() override; // HostLocator override. void FindHosts(FindHostsCallback callback) override; private: // Makes the MDnsClient start listening on port 5353 on each network // interface. bool StartListening(); // Creates a PTR transaction and finds all SMB services in the network. bool CreatePtrTransaction(); // Creates an SRV transaction, which returns the hostname of |service|. void CreateSrvTransaction(const std::string& service); // Creates an A transaction, which returns the address of |raw_hostname|. void CreateATransaction(const std::string& raw_hostname); // Handler for the PTR transaction request. Returns true if the transaction // successfully starts. void OnPtrTransactionResponse(net::MDnsTransaction::Result result, const net::RecordParsed* record); // Handler for the SRV transaction request. void OnSrvTransactionResponse(net::MDnsTransaction::Result result, const net::RecordParsed* record); // Handler for the A transaction request. void OnATransactionResponse(const std::string& raw_hostname, net::MDnsTransaction::Result result, const net::RecordParsed* record); // Resolves services that were found through a PTR transaction request. If // there are no more services to be processed, this will call the // FindHostsCallback with the hosts found. void ResolveServicesFound(); // Fires the callback if there are no more transactions left. void FireCallbackIfFinished(); // Fires the callback immediately. If |success| is true, return with the hosts // that were found. void FireCallback(bool success); // Resets the state of the MDnsClient and resets all members to default. void Reset(); // Returns the handler for the PTR transaction response. net::MDnsTransaction::ResultCallback GetPtrTransactionHandler(); // Returns the handler for the SRV transaction response. net::MDnsTransaction::ResultCallback GetSrvTransactionHandler(); // Returns the handler for the A transaction response. net::MDnsTransaction::ResultCallback GetATransactionHandler( const std::string& raw_hostname); bool running_ = false; uint32_t remaining_transactions_ = 0; FindHostsCallback callback_; std::vector<std::string> services_; HostMap results_; std::vector<std::unique_ptr<net::MDnsTransaction>> transactions_; std::unique_ptr<net::MDnsClient> mdns_client_; std::unique_ptr<net::MDnsSocketFactory> socket_factory_; DISALLOW_COPY_AND_ASSIGN(MDnsHostLocator); }; } // namespace smb_client } // namespace chromeos #endif // CHROME_BROWSER_CHROMEOS_SMB_CLIENT_DISCOVERY_MDNS_HOST_LOCATOR_H_
null
null
null
null
53,665
30,437
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
30,437
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2009 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_IMAGE_H_ #define THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_IMAGE_H_ #include "third_party/blink/public/platform/web_common.h" #include "third_party/blink/public/platform/web_vector.h" #include "base/time/time.h" #include "third_party/skia/include/core/SkBitmap.h" #if INSIDE_BLINK #include "base/memory/scoped_refptr.h" #include "third_party/blink/renderer/platform/graphics/image_orientation.h" #endif namespace blink { class Image; class WebData; struct WebSize; // A container for an ARGB bitmap. class WebImage { public: // An image with a duration associated. An animation is a sequence of // AnimationFrames played in succession. struct AnimationFrame { SkBitmap bitmap; base::TimeDelta duration; }; ~WebImage() { Reset(); } WebImage() { Init(); } WebImage(const WebImage& image) { Init(); Assign(image); } WebImage& operator=(const WebImage& image) { Assign(image); return *this; } // Decodes the given image data. If the image has multiple frames, // then the frame whose size is desired_size is returned. Otherwise, // the first frame is returned. BLINK_PLATFORM_EXPORT static WebImage FromData(const WebData&, const WebSize& desired_size); // Returns a list of all frames in the image. Only the first frame at each // pixel size will be returned. BLINK_PLATFORM_EXPORT static WebVector<WebImage> FramesFromData( const WebData&); // Returns a list of all animation frames in the image. BLINK_PLATFORM_EXPORT static WebVector<AnimationFrame> AnimationFromData( const WebData&); BLINK_PLATFORM_EXPORT void Reset(); BLINK_PLATFORM_EXPORT void Assign(const WebImage&); BLINK_PLATFORM_EXPORT bool IsNull() const; BLINK_PLATFORM_EXPORT WebSize Size() const; #if INSIDE_BLINK BLINK_PLATFORM_EXPORT WebImage(scoped_refptr<Image>, RespectImageOrientationEnum = kDoNotRespectImageOrientation); #endif WebImage(const SkBitmap& bitmap) : bitmap_(bitmap) {} WebImage& operator=(const SkBitmap& bitmap) { bitmap_ = bitmap; return *this; } SkBitmap& GetSkBitmap() { return bitmap_; } const SkBitmap& GetSkBitmap() const { return bitmap_; } private: void Init() {} SkBitmap bitmap_; }; } // namespace blink #endif
null
null
null
null
27,300
51,805
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,805
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MEDIA_MOJO_SERVICES_MOJO_PROVISION_FETCHER_H_ #define MEDIA_MOJO_SERVICES_MOJO_PROVISION_FETCHER_H_ #include "base/compiler_specific.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "media/base/provision_fetcher.h" #include "media/mojo/interfaces/provision_fetcher.mojom.h" #include "media/mojo/services/media_mojo_export.h" namespace media { // A ProvisionFetcher that proxies to a mojom::ProvisionFetcherPtr. class MEDIA_MOJO_EXPORT MojoProvisionFetcher : public ProvisionFetcher { public: explicit MojoProvisionFetcher( mojom::ProvisionFetcherPtr provision_fetcher_ptr); ~MojoProvisionFetcher() final; // ProvisionFetcher implementation: void Retrieve(const std::string& default_url, const std::string& request_data, const ResponseCB& response_cb) final; private: // Callback for mojom::ProvisionFetcherPtr::Retrieve(). void OnResponse(const ResponseCB& response_cb, bool success, const std::string& response); mojom::ProvisionFetcherPtr provision_fetcher_ptr_; base::WeakPtrFactory<MojoProvisionFetcher> weak_factory_; DISALLOW_COPY_AND_ASSIGN(MojoProvisionFetcher); }; } // namespace media #endif // MEDIA_MOJO_SERVICES_MOJO_PROVISION_FETCHER_H_
null
null
null
null
48,668
25,430
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
190,425
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "edp.h" #include "edp.xml.h" #define EDP_MAX_LANE 4 struct edp_phy { void __iomem *base; }; bool msm_edp_phy_ready(struct edp_phy *phy) { u32 status; int cnt = 100; while (--cnt) { status = edp_read(phy->base + REG_EDP_PHY_GLB_PHY_STATUS); if (status & 0x01) break; usleep_range(500, 1000); } if (cnt == 0) { pr_err("%s: PHY NOT ready\n", __func__); return false; } else { return true; } } void msm_edp_phy_ctrl(struct edp_phy *phy, int enable) { DBG("enable=%d", enable); if (enable) { /* Reset */ edp_write(phy->base + REG_EDP_PHY_CTRL, EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL); /* Make sure fully reset */ wmb(); usleep_range(500, 1000); edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000); edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f); edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1); } else { edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0); } } /* voltage mode and pre emphasis cfg */ void msm_edp_phy_vm_pe_init(struct edp_phy *phy) { edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3); edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64); edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c); } void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1) { edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0); edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1); } void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane) { u32 i; u32 data; if (up) data = 0; /* power up */ else data = 0x7; /* power down */ for (i = 0; i < max_lane; i++) edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); /* power down unused lane */ data = 0x7; /* power down */ for (i = max_lane; i < EDP_MAX_LANE; i++) edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); } void *msm_edp_phy_init(struct device *dev, void __iomem *regbase) { struct edp_phy *phy = NULL; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) return NULL; phy->base = regbase; return phy; }
null
null
null
null
98,772
40,016
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,011
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#undef TRACE_SYSTEM #define TRACE_SYSTEM rcu #if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RCU_H #include <linux/tracepoint.h> /* * Tracepoint for start/end markers used for utilization calculations. * By convention, the string is of the following forms: * * "Start <activity>" -- Mark the start of the specified activity, * such as "context switch". Nesting is permitted. * "End <activity>" -- Mark the end of the specified activity. * * An "@" character within "<activity>" is a comment character: Data * reduction scripts will ignore the "@" and the remainder of the line. */ TRACE_EVENT(rcu_utilization, TP_PROTO(const char *s), TP_ARGS(s), TP_STRUCT__entry( __field(const char *, s) ), TP_fast_assign( __entry->s = s; ), TP_printk("%s", __entry->s) ); #ifdef CONFIG_RCU_TRACE #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) /* * Tracepoint for grace-period events. Takes a string identifying the * RCU flavor, the grace-period number, and a string identifying the * grace-period-related event as follows: * * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. * "newreq": Request a new grace period. * "start": Start a grace period. * "cpustart": CPU first notices a grace-period start. * "cpuqs": CPU passes through a quiescent state. * "cpuonl": CPU comes online. * "cpuofl": CPU goes offline. * "reqwait": GP kthread sleeps waiting for grace-period request. * "reqwaitsig": GP kthread awakened by signal from reqwait state. * "fqswait": GP kthread waiting until time to force quiescent states. * "fqsstart": GP kthread starts forcing quiescent states. * "fqsend": GP kthread done forcing quiescent states. * "fqswaitsig": GP kthread awakened by signal from fqswait state. * "end": End a grace period. * "cpuend": CPU first notices a grace-period end. */ TRACE_EVENT(rcu_grace_period, TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent), TP_ARGS(rcuname, gpnum, gpevent), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpnum) __field(const char *, gpevent) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpnum = gpnum; __entry->gpevent = gpevent; ), TP_printk("%s %lu %s", __entry->rcuname, __entry->gpnum, __entry->gpevent) ); /* * Tracepoint for future grace-period events, including those for no-callbacks * CPUs. The caller should pull the data from the rcu_node structure, * other than rcuname, which comes from the rcu_state structure, and event, * which is one of the following: * * "Startleaf": Request a nocb grace period based on leaf-node data. * "Startedleaf": Leaf-node start proved sufficient. * "Startedleafroot": Leaf-node start proved sufficient after checking root. * "Startedroot": Requested a nocb grace period based on root-node data. * "StartWait": Start waiting for the requested grace period. * "ResumeWait": Resume waiting after signal. * "EndWait": Complete wait. * "Cleanup": Clean up rcu_node structure after previous GP. * "CleanupMore": Clean up, and another no-CB GP is needed. */ TRACE_EVENT(rcu_future_grace_period, TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed, unsigned long c, u8 level, int grplo, int grphi, const char *gpevent), TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpnum) __field(unsigned long, completed) __field(unsigned long, c) __field(u8, level) __field(int, grplo) __field(int, grphi) __field(const char *, gpevent) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpnum = gpnum; __entry->completed = completed; __entry->c = c; __entry->level = level; __entry->grplo = grplo; __entry->grphi = grphi; __entry->gpevent = gpevent; ), TP_printk("%s %lu %lu %lu %u %d %d %s", __entry->rcuname, __entry->gpnum, __entry->completed, __entry->c, __entry->level, __entry->grplo, __entry->grphi, __entry->gpevent) ); /* * Tracepoint for grace-period-initialization events. These are * distinguished by the type of RCU, the new grace-period number, the * rcu_node structure level, the starting and ending CPU covered by the * rcu_node structure, and the mask of CPUs that will be waited for. * All but the type of RCU are extracted from the rcu_node structure. */ TRACE_EVENT(rcu_grace_period_init, TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level, int grplo, int grphi, unsigned long qsmask), TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpnum) __field(u8, level) __field(int, grplo) __field(int, grphi) __field(unsigned long, qsmask) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpnum = gpnum; __entry->level = level; __entry->grplo = grplo; __entry->grphi = grphi; __entry->qsmask = qsmask; ), TP_printk("%s %lu %u %d %d %lx", __entry->rcuname, __entry->gpnum, __entry->level, __entry->grplo, __entry->grphi, __entry->qsmask) ); /* * Tracepoint for expedited grace-period events. Takes a string identifying * the RCU flavor, the expedited grace-period sequence number, and a string * identifying the grace-period-related event as follows: * * "snap": Captured snapshot of expedited grace period sequence number. * "start": Started a real expedited grace period. * "end": Ended a real expedited grace period. * "endwake": Woke piggybackers up. * "done": Someone else did the expedited grace period for us. */ TRACE_EVENT(rcu_exp_grace_period, TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent), TP_ARGS(rcuname, gpseq, gpevent), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpseq) __field(const char *, gpevent) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpseq = gpseq; __entry->gpevent = gpevent; ), TP_printk("%s %lu %s", __entry->rcuname, __entry->gpseq, __entry->gpevent) ); /* * Tracepoint for expedited grace-period funnel-locking events. Takes a * string identifying the RCU flavor, an integer identifying the rcu_node * combining-tree level, another pair of integers identifying the lowest- * and highest-numbered CPU associated with the current rcu_node structure, * and a string. identifying the grace-period-related event as follows: * * "nxtlvl": Advance to next level of rcu_node funnel * "wait": Wait for someone else to do expedited GP */ TRACE_EVENT(rcu_exp_funnel_lock, TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi, const char *gpevent), TP_ARGS(rcuname, level, grplo, grphi, gpevent), TP_STRUCT__entry( __field(const char *, rcuname) __field(u8, level) __field(int, grplo) __field(int, grphi) __field(const char *, gpevent) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->level = level; __entry->grplo = grplo; __entry->grphi = grphi; __entry->gpevent = gpevent; ), TP_printk("%s %d %d %d %s", __entry->rcuname, __entry->level, __entry->grplo, __entry->grphi, __entry->gpevent) ); /* * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended * to assist debugging of these handoffs. * * The first argument is the name of the RCU flavor, and the second is * the number of the offloaded CPU are extracted. The third and final * argument is a string as follows: * * "WakeEmpty": Wake rcuo kthread, first CB to empty list. * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list. * "WakeOvf": Wake rcuo kthread, CB list is huge. * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. * "WakeNot": Don't wake rcuo kthread. * "WakeNotPoll": Don't wake rcuo kthread because it is polling. * "DeferredWake": Carried out the "IsDeferred" wakeup. * "Poll": Start of new polling cycle for rcu_nocb_poll. * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll. * "WokeEmpty": rcuo kthread woke to find empty list. * "WokeNonEmpty": rcuo kthread woke to find non-empty list. * "WaitQueue": Enqueue partially done, timed wait for it to complete. * "WokeQueue": Partial enqueue now complete. */ TRACE_EVENT(rcu_nocb_wake, TP_PROTO(const char *rcuname, int cpu, const char *reason), TP_ARGS(rcuname, cpu, reason), TP_STRUCT__entry( __field(const char *, rcuname) __field(int, cpu) __field(const char *, reason) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->cpu = cpu; __entry->reason = reason; ), TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason) ); /* * Tracepoint for tasks blocking within preemptible-RCU read-side * critical sections. Track the type of RCU (which one day might * include SRCU), the grace-period number that the task is blocking * (the current or the next), and the task's PID. */ TRACE_EVENT(rcu_preempt_task, TP_PROTO(const char *rcuname, int pid, unsigned long gpnum), TP_ARGS(rcuname, pid, gpnum), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpnum) __field(int, pid) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpnum = gpnum; __entry->pid = pid; ), TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid) ); /* * Tracepoint for tasks that blocked within a given preemptible-RCU * read-side critical section exiting that critical section. Track the * type of RCU (which one day might include SRCU) and the task's PID. */ TRACE_EVENT(rcu_unlock_preempted_task, TP_PROTO(const char *rcuname, unsigned long gpnum, int pid), TP_ARGS(rcuname, gpnum, pid), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpnum) __field(int, pid) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpnum = gpnum; __entry->pid = pid; ), TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid) ); /* * Tracepoint for quiescent-state-reporting events. These are * distinguished by the type of RCU, the grace-period number, the * mask of quiescent lower-level entities, the rcu_node structure level, * the starting and ending CPU covered by the rcu_node structure, and * whether there are any blocked tasks blocking the current grace period. * All but the type of RCU are extracted from the rcu_node structure. */ TRACE_EVENT(rcu_quiescent_state_report, TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long mask, unsigned long qsmask, u8 level, int grplo, int grphi, int gp_tasks), TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpnum) __field(unsigned long, mask) __field(unsigned long, qsmask) __field(u8, level) __field(int, grplo) __field(int, grphi) __field(u8, gp_tasks) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpnum = gpnum; __entry->mask = mask; __entry->qsmask = qsmask; __entry->level = level; __entry->grplo = grplo; __entry->grphi = grphi; __entry->gp_tasks = gp_tasks; ), TP_printk("%s %lu %lx>%lx %u %d %d %u", __entry->rcuname, __entry->gpnum, __entry->mask, __entry->qsmask, __entry->level, __entry->grplo, __entry->grphi, __entry->gp_tasks) ); /* * Tracepoint for quiescent states detected by force_quiescent_state(). * These trace events include the type of RCU, the grace-period number that * was blocked by the CPU, the CPU itself, and the type of quiescent state, * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick" * when kicking a CPU that has been in dyntick-idle mode for too long, or * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr. */ TRACE_EVENT(rcu_fqs, TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), TP_ARGS(rcuname, gpnum, cpu, qsevent), TP_STRUCT__entry( __field(const char *, rcuname) __field(unsigned long, gpnum) __field(int, cpu) __field(const char *, qsevent) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->gpnum = gpnum; __entry->cpu = cpu; __entry->qsevent = qsevent; ), TP_printk("%s %lu %d %s", __entry->rcuname, __entry->gpnum, __entry->cpu, __entry->qsevent) ); #endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) */ /* * Tracepoint for dyntick-idle entry/exit events. These take a string * as argument: "Start" for entering dyntick-idle mode, "End" for * leaving it, "--=" for events moving towards idle, and "++=" for events * moving away from idle. "Error on entry: not idle task" and "Error on * exit: not idle task" indicate that a non-idle task is erroneously * toying with the idle loop. * * These events also take a pair of numbers, which indicate the nesting * depth before and after the event of interest. Note that task-related * events use the upper bits of each number, while interrupt-related * events use the lower bits. */ TRACE_EVENT(rcu_dyntick, TP_PROTO(const char *polarity, long long oldnesting, long long newnesting), TP_ARGS(polarity, oldnesting, newnesting), TP_STRUCT__entry( __field(const char *, polarity) __field(long long, oldnesting) __field(long long, newnesting) ), TP_fast_assign( __entry->polarity = polarity; __entry->oldnesting = oldnesting; __entry->newnesting = newnesting; ), TP_printk("%s %llx %llx", __entry->polarity, __entry->oldnesting, __entry->newnesting) ); /* * Tracepoint for RCU preparation for idle, the goal being to get RCU * processing done so that the current CPU can shut off its scheduling * clock and enter dyntick-idle mode. One way to accomplish this is * to drain all RCU callbacks from this CPU, and the other is to have * done everything RCU requires for the current grace period. In this * latter case, the CPU will be awakened at the end of the current grace * period in order to process the remainder of its callbacks. * * These tracepoints take a string as argument: * * "No callbacks": Nothing to do, no callbacks on this CPU. * "In holdoff": Nothing to do, holding off after unsuccessful attempt. * "Begin holdoff": Attempt failed, don't retry until next jiffy. * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks. * "More callbacks": Still more callbacks, try again to clear them out. * "Callbacks drained": All callbacks processed, off to dyntick idle! * "Timer": Timer fired to cause CPU to continue processing callbacks. * "Demigrate": Timer fired on wrong CPU, woke up correct CPU. * "Cleanup after idle": Idle exited, timer canceled. */ TRACE_EVENT(rcu_prep_idle, TP_PROTO(const char *reason), TP_ARGS(reason), TP_STRUCT__entry( __field(const char *, reason) ), TP_fast_assign( __entry->reason = reason; ), TP_printk("%s", __entry->reason) ); /* * Tracepoint for the registration of a single RCU callback function. * The first argument is the type of RCU, the second argument is * a pointer to the RCU callback itself, the third element is the * number of lazy callbacks queued, and the fourth element is the * total number of callbacks queued. */ TRACE_EVENT(rcu_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, long qlen), TP_ARGS(rcuname, rhp, qlen_lazy, qlen), TP_STRUCT__entry( __field(const char *, rcuname) __field(void *, rhp) __field(void *, func) __field(long, qlen_lazy) __field(long, qlen) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->rhp = rhp; __entry->func = rhp->func; __entry->qlen_lazy = qlen_lazy; __entry->qlen = qlen; ), TP_printk("%s rhp=%p func=%pf %ld/%ld", __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen_lazy, __entry->qlen) ); /* * Tracepoint for the registration of a single RCU callback of the special * kfree() form. The first argument is the RCU type, the second argument * is a pointer to the RCU callback, the third argument is the offset * of the callback within the enclosing RCU-protected data structure, * the fourth argument is the number of lazy callbacks queued, and the * fifth argument is the total number of callbacks queued. */ TRACE_EVENT(rcu_kfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, long qlen_lazy, long qlen), TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), TP_STRUCT__entry( __field(const char *, rcuname) __field(void *, rhp) __field(unsigned long, offset) __field(long, qlen_lazy) __field(long, qlen) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->rhp = rhp; __entry->offset = offset; __entry->qlen_lazy = qlen_lazy; __entry->qlen = qlen; ), TP_printk("%s rhp=%p func=%ld %ld/%ld", __entry->rcuname, __entry->rhp, __entry->offset, __entry->qlen_lazy, __entry->qlen) ); /* * Tracepoint for marking the beginning rcu_do_batch, performed to start * RCU callback invocation. The first argument is the RCU flavor, * the second is the number of lazy callbacks queued, the third is * the total number of callbacks queued, and the fourth argument is * the current RCU-callback batch limit. */ TRACE_EVENT(rcu_batch_start, TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), TP_ARGS(rcuname, qlen_lazy, qlen, blimit), TP_STRUCT__entry( __field(const char *, rcuname) __field(long, qlen_lazy) __field(long, qlen) __field(long, blimit) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->qlen_lazy = qlen_lazy; __entry->qlen = qlen; __entry->blimit = blimit; ), TP_printk("%s CBs=%ld/%ld bl=%ld", __entry->rcuname, __entry->qlen_lazy, __entry->qlen, __entry->blimit) ); /* * Tracepoint for the invocation of a single RCU callback function. * The first argument is the type of RCU, and the second argument is * a pointer to the RCU callback itself. */ TRACE_EVENT(rcu_invoke_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp), TP_ARGS(rcuname, rhp), TP_STRUCT__entry( __field(const char *, rcuname) __field(void *, rhp) __field(void *, func) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->rhp = rhp; __entry->func = rhp->func; ), TP_printk("%s rhp=%p func=%pf", __entry->rcuname, __entry->rhp, __entry->func) ); /* * Tracepoint for the invocation of a single RCU callback of the special * kfree() form. The first argument is the RCU flavor, the second * argument is a pointer to the RCU callback, and the third argument * is the offset of the callback within the enclosing RCU-protected * data structure. */ TRACE_EVENT(rcu_invoke_kfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), TP_ARGS(rcuname, rhp, offset), TP_STRUCT__entry( __field(const char *, rcuname) __field(void *, rhp) __field(unsigned long, offset) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->rhp = rhp; __entry->offset = offset; ), TP_printk("%s rhp=%p func=%ld", __entry->rcuname, __entry->rhp, __entry->offset) ); /* * Tracepoint for exiting rcu_do_batch after RCU callbacks have been * invoked. The first argument is the name of the RCU flavor, * the second argument is number of callbacks actually invoked, * the third argument (cb) is whether or not any of the callbacks that * were ready to invoke at the beginning of this batch are still * queued, the fourth argument (nr) is the return value of need_resched(), * the fifth argument (iit) is 1 if the current task is the idle task, * and the sixth argument (risk) is the return value from * rcu_is_callbacks_kthread(). */ TRACE_EVENT(rcu_batch_end, TP_PROTO(const char *rcuname, int callbacks_invoked, char cb, char nr, char iit, char risk), TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), TP_STRUCT__entry( __field(const char *, rcuname) __field(int, callbacks_invoked) __field(char, cb) __field(char, nr) __field(char, iit) __field(char, risk) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->callbacks_invoked = callbacks_invoked; __entry->cb = cb; __entry->nr = nr; __entry->iit = iit; __entry->risk = risk; ), TP_printk("%s CBs-invoked=%d idle=%c%c%c%c", __entry->rcuname, __entry->callbacks_invoked, __entry->cb ? 'C' : '.', __entry->nr ? 'S' : '.', __entry->iit ? 'I' : '.', __entry->risk ? 'R' : '.') ); /* * Tracepoint for rcutorture readers. The first argument is the name * of the RCU flavor from rcutorture's viewpoint and the second argument * is the callback address. The third argument is the start time in * seconds, and the last two arguments are the grace period numbers * at the beginning and end of the read, respectively. Note that the * callback address can be NULL. */ TRACE_EVENT(rcu_torture_read, TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, unsigned long secs, unsigned long c_old, unsigned long c), TP_ARGS(rcutorturename, rhp, secs, c_old, c), TP_STRUCT__entry( __field(const char *, rcutorturename) __field(struct rcu_head *, rhp) __field(unsigned long, secs) __field(unsigned long, c_old) __field(unsigned long, c) ), TP_fast_assign( __entry->rcutorturename = rcutorturename; __entry->rhp = rhp; __entry->secs = secs; __entry->c_old = c_old; __entry->c = c; ), TP_printk("%s torture read %p %luus c: %lu %lu", __entry->rcutorturename, __entry->rhp, __entry->secs, __entry->c_old, __entry->c) ); /* * Tracepoint for _rcu_barrier() execution. The string "s" describes * the _rcu_barrier phase: * "Begin": _rcu_barrier() started. * "EarlyExit": _rcu_barrier() piggybacked, thus early exit. * "Inc1": _rcu_barrier() piggyback check counter incremented. * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU. * "OnlineQ": _rcu_barrier() found online CPU with callbacks. * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. * "CB": An rcu_barrier_callback() invoked a callback, not the last. * "LastCB": An rcu_barrier_callback() invoked the last callback. * "Inc2": _rcu_barrier() piggyback check counter incremented. * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * is the count of remaining callbacks, and "done" is the piggybacking count. */ TRACE_EVENT(rcu_barrier, TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), TP_ARGS(rcuname, s, cpu, cnt, done), TP_STRUCT__entry( __field(const char *, rcuname) __field(const char *, s) __field(int, cpu) __field(int, cnt) __field(unsigned long, done) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->s = s; __entry->cpu = cpu; __entry->cnt = cnt; __entry->done = done; ), TP_printk("%s %s cpu %d remaining %d # %lu", __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt, __entry->done) ); #else /* #ifdef CONFIG_RCU_TRACE */ #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ level, grplo, grphi, event) \ do { } while (0) #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \ qsmask) do { } while (0) #define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \ do { } while (0) #define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \ do { } while (0) #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ grplo, grphi, gp_tasks) do { } \ while (0) #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) #define trace_rcu_prep_idle(reason) do { } while (0) #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ do { } while (0) #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \ do { } while (0) #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ do { } while (0) #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ do { } while (0) #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) #endif /* #else #ifdef CONFIG_RCU_TRACE */ #endif /* _TRACE_RCU_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
null
null
null
null
113,358
34,785
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
199,780
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* dvb-usb-i2c.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for (de-)initializing an I2C adapter. */ #include "dvb-usb-common.h" int dvb_usb_i2c_init(struct dvb_usb_device *d) { int ret = 0; if (!(d->props.caps & DVB_USB_IS_AN_I2C_ADAPTER)) return 0; if (d->props.i2c_algo == NULL) { err("no i2c algorithm specified"); return -EINVAL; } strlcpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name)); d->i2c_adap.algo = d->props.i2c_algo; d->i2c_adap.algo_data = NULL; d->i2c_adap.dev.parent = &d->udev->dev; i2c_set_adapdata(&d->i2c_adap, d); if ((ret = i2c_add_adapter(&d->i2c_adap)) < 0) err("could not add i2c adapter"); d->state |= DVB_USB_STATE_I2C; return ret; } int dvb_usb_i2c_exit(struct dvb_usb_device *d) { if (d->state & DVB_USB_STATE_I2C) i2c_del_adapter(&d->i2c_adap); d->state &= ~DVB_USB_STATE_I2C; return 0; }
null
null
null
null
108,127
22,344
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
22,344
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stddef.h> #include <stdint.h> #include "base/files/file_util.h" #include "base/location.h" #include "base/macros.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/single_thread_task_runner.h" #include "base/threading/thread.h" #include "base/threading/thread_task_runner_handle.h" #include "components/services/leveldb/public/cpp/util.h" #include "content/browser/dom_storage/local_storage_database.pb.h" #include "content/browser/gpu/shader_cache_factory.h" #include "content/browser/storage_partition_impl.h" #include "content/public/browser/local_storage_usage_info.h" #include "content/public/browser/storage_partition.h" #include "content/public/test/test_browser_context.h" #include "content/public/test/test_browser_thread.h" #include "content/public/test/test_browser_thread_bundle.h" #include "content/test/fake_leveldb_database.h" #include "net/base/test_completion_callback.h" #include "net/cookies/canonical_cookie.h" #include "net/cookies/cookie_store.h" #include "net/url_request/url_request_context.h" #include "net/url_request/url_request_context_getter.h" #include "ppapi/buildflags/buildflags.h" #include "storage/browser/quota/quota_manager.h" #include "storage/browser/test/mock_quota_manager.h" #include "storage/browser/test/mock_special_storage_policy.h" #include "testing/gtest/include/gtest/gtest.h" #if BUILDFLAG(ENABLE_PLUGINS) #include "base/memory/ptr_util.h" #include "ppapi/shared_impl/ppapi_constants.h" // nogncheck #include "storage/browser/fileapi/async_file_util.h" #include "storage/browser/fileapi/file_system_context.h" #include "storage/browser/fileapi/file_system_operation_context.h" #include "storage/browser/fileapi/isolated_context.h" #include "storage/common/fileapi/file_system_util.h" #endif // BUILDFLAG(ENABLE_PLUGINS) using net::CanonicalCookie; namespace content { namespace { const int kDefaultClientId = 42; const char kCacheKey[] = "key"; const char kCacheValue[] = "cached value"; const char kTestOrigin1[] = "http://host1:1/"; const char kTestOrigin2[] = "http://host2:1/"; const char kTestOrigin3[] = "http://host3:1/"; const char kTestOriginDevTools[] = "chrome-devtools://abcdefghijklmnopqrstuvw/"; #if BUILDFLAG(ENABLE_PLUGINS) const char kWidevineCdmPluginId[] = "application_x-ppapi-widevine-cdm"; const char kClearKeyCdmPluginId[] = "application_x-ppapi-clearkey-cdm"; #endif // BUILDFLAG(ENABLE_PLUGINS) const GURL kOrigin1(kTestOrigin1); const GURL kOrigin2(kTestOrigin2); const GURL kOrigin3(kTestOrigin3); const GURL kOriginDevTools(kTestOriginDevTools); const blink::mojom::StorageType kTemporary = blink::mojom::StorageType::kTemporary; const blink::mojom::StorageType kPersistent = blink::mojom::StorageType::kPersistent; const storage::QuotaClient::ID kClientFile = storage::QuotaClient::kFileSystem; const uint32_t kAllQuotaRemoveMask = StoragePartition::REMOVE_DATA_MASK_APPCACHE | StoragePartition::REMOVE_DATA_MASK_FILE_SYSTEMS | StoragePartition::REMOVE_DATA_MASK_INDEXEDDB | StoragePartition::REMOVE_DATA_MASK_WEBSQL; bool AlwaysTrueCookiePredicate(const net::CanonicalCookie& cookie) { return true; } bool AlwaysFalseCookiePredicate(const net::CanonicalCookie& cookie) { return false; } class AwaitCompletionHelper { public: AwaitCompletionHelper() : start_(false), already_quit_(false) {} virtual ~AwaitCompletionHelper() {} void BlockUntilNotified() { if (!already_quit_) { DCHECK(!start_); start_ = true; base::RunLoop().Run(); } else { DCHECK(!start_); already_quit_ = false; } } void Notify() { if (start_) { DCHECK(!already_quit_); base::RunLoop::QuitCurrentWhenIdleDeprecated(); start_ = false; } else { DCHECK(!already_quit_); already_quit_ = true; } } private: // Helps prevent from running message_loop, if the callback invoked // immediately. bool start_; bool already_quit_; DISALLOW_COPY_AND_ASSIGN(AwaitCompletionHelper); }; class RemoveCookieTester { public: explicit RemoveCookieTester(TestBrowserContext* context) : get_cookie_success_(false), cookie_store_(context->GetRequestContext() ->GetURLRequestContext() ->cookie_store()) {} // Returns true, if the given cookie exists in the cookie store. bool ContainsCookie() { get_cookie_success_ = false; cookie_store_->GetCookieListWithOptionsAsync( kOrigin1, net::CookieOptions(), base::BindOnce(&RemoveCookieTester::GetCookieListCallback, base::Unretained(this))); await_completion_.BlockUntilNotified(); return get_cookie_success_; } void AddCookie() { cookie_store_->SetCookieWithOptionsAsync( kOrigin1, "A=1", net::CookieOptions(), base::BindOnce(&RemoveCookieTester::SetCookieCallback, base::Unretained(this))); await_completion_.BlockUntilNotified(); } private: void GetCookieListCallback(const net::CookieList& cookie_list) { std::string cookie_line = net::CanonicalCookie::BuildCookieLine(cookie_list); if (cookie_line == "A=1") { get_cookie_success_ = true; } else { EXPECT_EQ("", cookie_line); get_cookie_success_ = false; } await_completion_.Notify(); } void SetCookieCallback(bool result) { ASSERT_TRUE(result); await_completion_.Notify(); } bool get_cookie_success_; AwaitCompletionHelper await_completion_; net::CookieStore* cookie_store_; DISALLOW_COPY_AND_ASSIGN(RemoveCookieTester); }; class RemoveLocalStorageTester { public: explicit RemoveLocalStorageTester(TestBrowserContext* profile) : dom_storage_context_(nullptr), mock_db_(&mock_data_), db_binding_(&mock_db_) { dom_storage_context_ = content::BrowserContext::GetDefaultStoragePartition(profile)-> GetDOMStorageContext(); } // Returns true, if the given origin URL exists. bool DOMStorageExistsForOrigin(const GURL& origin) { GetLocalStorageUsage(); await_completion_.BlockUntilNotified(); for (size_t i = 0; i < infos_.size(); ++i) { if (origin == infos_[i].origin) return true; } return false; } void AddDOMStorageTestData() { // Note: This test depends on details of how the dom_storage library // stores data in the database. leveldb::mojom::LevelDBDatabaseAssociatedPtr database_ptr; leveldb::mojom::LevelDBDatabaseAssociatedRequest request = MakeRequestAssociatedWithDedicatedPipe(&database_ptr); static_cast<DOMStorageContextWrapper*>(dom_storage_context_) ->SetLocalStorageDatabaseForTesting(std::move(database_ptr)); db_binding_.Bind(std::move(request)); LocalStorageOriginMetaData data; base::Time now = base::Time::Now(); data.set_last_modified(now.ToInternalValue()); data.set_size_bytes(16); mock_data_[CreateMetaDataKey(url::Origin::Create(kOrigin1))] = leveldb::StdStringToUint8Vector(data.SerializeAsString()); mock_data_[CreateDataKey(url::Origin::Create(kOrigin1))] = {}; base::Time one_day_ago = now - base::TimeDelta::FromDays(1); data.set_last_modified(one_day_ago.ToInternalValue()); mock_data_[CreateMetaDataKey(url::Origin::Create(kOrigin2))] = leveldb::StdStringToUint8Vector(data.SerializeAsString()); mock_data_[CreateDataKey(url::Origin::Create(kOrigin2))] = {}; base::Time sixty_days_ago = now - base::TimeDelta::FromDays(60); data.set_last_modified(sixty_days_ago.ToInternalValue()); mock_data_[CreateMetaDataKey(url::Origin::Create(kOrigin3))] = leveldb::StdStringToUint8Vector(data.SerializeAsString()); mock_data_[CreateDataKey(url::Origin::Create(kOrigin3))] = {}; } private: std::vector<uint8_t> CreateDataKey(const url::Origin& origin) { auto serialized_origin = leveldb::StdStringToUint8Vector(origin.Serialize()); std::vector<uint8_t> key = {'_'}; key.insert(key.end(), serialized_origin.begin(), serialized_origin.end()); key.push_back(0); key.push_back('X'); return key; } std::vector<uint8_t> CreateMetaDataKey(const url::Origin& origin) { const uint8_t kMetaPrefix[] = {'M', 'E', 'T', 'A', ':'}; auto serialized_origin = leveldb::StdStringToUint8Vector(origin.Serialize()); std::vector<uint8_t> key; key.reserve(arraysize(kMetaPrefix) + serialized_origin.size()); key.insert(key.end(), kMetaPrefix, kMetaPrefix + arraysize(kMetaPrefix)); key.insert(key.end(), serialized_origin.begin(), serialized_origin.end()); return key; } void GetLocalStorageUsage() { dom_storage_context_->GetLocalStorageUsage( base::Bind(&RemoveLocalStorageTester::OnGotLocalStorageUsage, base::Unretained(this))); } void OnGotLocalStorageUsage( const std::vector<content::LocalStorageUsageInfo>& infos) { infos_ = infos; await_completion_.Notify(); } // We don't own these pointers. content::DOMStorageContext* dom_storage_context_; std::map<std::vector<uint8_t>, std::vector<uint8_t>> mock_data_; FakeLevelDBDatabase mock_db_; mojo::AssociatedBinding<leveldb::mojom::LevelDBDatabase> db_binding_; std::vector<content::LocalStorageUsageInfo> infos_; AwaitCompletionHelper await_completion_; DISALLOW_COPY_AND_ASSIGN(RemoveLocalStorageTester); }; #if BUILDFLAG(ENABLE_PLUGINS) class RemovePluginPrivateDataTester { public: explicit RemovePluginPrivateDataTester( storage::FileSystemContext* filesystem_context) : filesystem_context_(filesystem_context) {} // Add some files to the PluginPrivateFileSystem. They are created as follows: // kOrigin1 - ClearKey - 1 file - timestamp 10 days ago // kOrigin2 - Widevine - 2 files - timestamps now and 60 days ago void AddPluginPrivateTestData() { base::Time now = base::Time::Now(); base::Time ten_days_ago = now - base::TimeDelta::FromDays(10); base::Time sixty_days_ago = now - base::TimeDelta::FromDays(60); // Create a PluginPrivateFileSystem for ClearKey and add a single file // with a timestamp of 1 day ago. std::string clearkey_fsid = CreateFileSystem(kClearKeyCdmPluginId, kOrigin1); clearkey_file_ = CreateFile(kOrigin1, clearkey_fsid, "foo"); SetFileTimestamp(clearkey_file_, ten_days_ago); // Create a second PluginPrivateFileSystem for Widevine and add two files // with different times. std::string widevine_fsid = CreateFileSystem(kWidevineCdmPluginId, kOrigin2); storage::FileSystemURL widevine_file1 = CreateFile(kOrigin2, widevine_fsid, "bar1"); storage::FileSystemURL widevine_file2 = CreateFile(kOrigin2, widevine_fsid, "bar2"); SetFileTimestamp(widevine_file1, now); SetFileTimestamp(widevine_file2, sixty_days_ago); } void DeleteClearKeyTestData() { DeleteFile(clearkey_file_); } // Returns true, if the given origin exists in a PluginPrivateFileSystem. bool DataExistsForOrigin(const GURL& origin) { AwaitCompletionHelper await_completion; bool data_exists_for_origin = false; filesystem_context_->default_file_task_runner()->PostTask( FROM_HERE, base::BindOnce(&RemovePluginPrivateDataTester:: CheckIfDataExistsForOriginOnFileTaskRunner, base::Unretained(this), origin, &data_exists_for_origin, &await_completion)); await_completion.BlockUntilNotified(); return data_exists_for_origin; } // Opens the file created for ClearKey (in kOrigin1) for writing. Caller // needs to verify if the file was opened or not. base::File OpenClearKeyFileForWrite() { AwaitCompletionHelper await_completion; base::File file; storage::AsyncFileUtil* async_file_util = filesystem_context_->GetAsyncFileUtil( storage::kFileSystemTypePluginPrivate); std::unique_ptr<storage::FileSystemOperationContext> operation_context = std::make_unique<storage::FileSystemOperationContext>( filesystem_context_); async_file_util->CreateOrOpen( std::move(operation_context), clearkey_file_, base::File::FLAG_OPEN | base::File::FLAG_WRITE, base::BindOnce(&RemovePluginPrivateDataTester::OnFileOpened, base::Unretained(this), &file, &await_completion)); await_completion.BlockUntilNotified(); return file; } private: // Creates a PluginPrivateFileSystem for the |plugin_name| and |origin| // provided. Returns the file system ID for the created // PluginPrivateFileSystem. std::string CreateFileSystem(const std::string& plugin_name, const GURL& origin) { AwaitCompletionHelper await_completion; std::string fsid = storage::IsolatedContext::GetInstance() ->RegisterFileSystemForVirtualPath( storage::kFileSystemTypePluginPrivate, ppapi::kPluginPrivateRootName, base::FilePath()); EXPECT_TRUE(storage::ValidateIsolatedFileSystemId(fsid)); filesystem_context_->OpenPluginPrivateFileSystem( origin, storage::kFileSystemTypePluginPrivate, fsid, plugin_name, storage::OPEN_FILE_SYSTEM_CREATE_IF_NONEXISTENT, base::BindOnce(&RemovePluginPrivateDataTester::OnFileSystemOpened, base::Unretained(this), &await_completion)); await_completion.BlockUntilNotified(); return fsid; } // Creates a file named |file_name| in the PluginPrivateFileSystem identified // by |origin| and |fsid|. Returns the URL for the created file. The file // must not already exist or the test will fail. storage::FileSystemURL CreateFile(const GURL& origin, const std::string& fsid, const std::string& file_name) { AwaitCompletionHelper await_completion; std::string root = storage::GetIsolatedFileSystemRootURIString( origin, fsid, ppapi::kPluginPrivateRootName); storage::FileSystemURL file_url = filesystem_context_->CrackURL(GURL(root + file_name)); storage::AsyncFileUtil* file_util = filesystem_context_->GetAsyncFileUtil( storage::kFileSystemTypePluginPrivate); std::unique_ptr<storage::FileSystemOperationContext> operation_context = std::make_unique<storage::FileSystemOperationContext>( filesystem_context_); operation_context->set_allowed_bytes_growth( storage::QuotaManager::kNoLimit); file_util->EnsureFileExists( std::move(operation_context), file_url, base::BindOnce(&RemovePluginPrivateDataTester::OnFileCreated, base::Unretained(this), &await_completion)); await_completion.BlockUntilNotified(); return file_url; } void DeleteFile(storage::FileSystemURL file_url) { AwaitCompletionHelper await_completion; storage::AsyncFileUtil* file_util = filesystem_context_->GetAsyncFileUtil( storage::kFileSystemTypePluginPrivate); std::unique_ptr<storage::FileSystemOperationContext> operation_context = std::make_unique<storage::FileSystemOperationContext>( filesystem_context_); file_util->DeleteFile( std::move(operation_context), file_url, base::BindOnce(&RemovePluginPrivateDataTester::OnFileDeleted, base::Unretained(this), &await_completion)); await_completion.BlockUntilNotified(); } // Sets the last_access_time and last_modified_time to |time_stamp| on the // file specified by |file_url|. The file must already exist. void SetFileTimestamp(const storage::FileSystemURL& file_url, const base::Time& time_stamp) { AwaitCompletionHelper await_completion; storage::AsyncFileUtil* file_util = filesystem_context_->GetAsyncFileUtil( storage::kFileSystemTypePluginPrivate); std::unique_ptr<storage::FileSystemOperationContext> operation_context = std::make_unique<storage::FileSystemOperationContext>( filesystem_context_); file_util->Touch( std::move(operation_context), file_url, time_stamp, time_stamp, base::BindOnce(&RemovePluginPrivateDataTester::OnFileTouched, base::Unretained(this), &await_completion)); await_completion.BlockUntilNotified(); } void OnFileSystemOpened(AwaitCompletionHelper* await_completion, base::File::Error result) { EXPECT_EQ(base::File::FILE_OK, result) << base::File::ErrorToString(result); await_completion->Notify(); } void OnFileCreated(AwaitCompletionHelper* await_completion, base::File::Error result, bool created) { EXPECT_EQ(base::File::FILE_OK, result) << base::File::ErrorToString(result); EXPECT_TRUE(created); await_completion->Notify(); } void OnFileDeleted(AwaitCompletionHelper* await_completion, base::File::Error result) { EXPECT_EQ(base::File::FILE_OK, result) << base::File::ErrorToString(result); await_completion->Notify(); } void OnFileTouched(AwaitCompletionHelper* await_completion, base::File::Error result) { EXPECT_EQ(base::File::FILE_OK, result) << base::File::ErrorToString(result); await_completion->Notify(); } void OnFileOpened(base::File* file_result, AwaitCompletionHelper* await_completion, base::File file, base::OnceClosure on_close_callback) { *file_result = std::move(file); await_completion->Notify(); } // If |origin| exists in the PluginPrivateFileSystem, set // |data_exists_for_origin| to true, false otherwise. void CheckIfDataExistsForOriginOnFileTaskRunner( const GURL& origin, bool* data_exists_for_origin, AwaitCompletionHelper* await_completion) { storage::FileSystemBackend* backend = filesystem_context_->GetFileSystemBackend( storage::kFileSystemTypePluginPrivate); storage::FileSystemQuotaUtil* quota_util = backend->GetQuotaUtil(); // Determine the set of origins used. std::set<GURL> origins; quota_util->GetOriginsForTypeOnFileTaskRunner( storage::kFileSystemTypePluginPrivate, &origins); *data_exists_for_origin = origins.find(origin) != origins.end(); // AwaitCompletionHelper and MessageLoop don't work on a // SequencedTaskRunner, so post a task on the IO thread. BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, base::BindOnce(&AwaitCompletionHelper::Notify, base::Unretained(await_completion))); } // We don't own this pointer. storage::FileSystemContext* filesystem_context_; // Keep track of the URL for the ClearKey file so that it can be written to // or deleted. storage::FileSystemURL clearkey_file_; DISALLOW_COPY_AND_ASSIGN(RemovePluginPrivateDataTester); }; #endif // BUILDFLAG(ENABLE_PLUGINS) bool IsWebSafeSchemeForTest(const std::string& scheme) { return scheme == "http"; } bool DoesOriginMatchForUnprotectedWeb( const GURL& origin, storage::SpecialStoragePolicy* special_storage_policy) { if (IsWebSafeSchemeForTest(origin.scheme())) return !special_storage_policy->IsStorageProtected(origin.GetOrigin()); return false; } bool DoesOriginMatchForBothProtectedAndUnprotectedWeb( const GURL& origin, storage::SpecialStoragePolicy* special_storage_policy) { return true; } bool DoesOriginMatchUnprotected( const GURL& origin, storage::SpecialStoragePolicy* special_storage_policy) { return origin.GetOrigin().scheme() != kOriginDevTools.scheme(); } void ClearQuotaData(content::StoragePartition* partition, base::RunLoop* loop_to_quit) { partition->ClearData(kAllQuotaRemoveMask, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL, GURL(), StoragePartition::OriginMatcherFunction(), base::Time(), base::Time::Max(), loop_to_quit->QuitClosure()); } void ClearQuotaDataWithOriginMatcher( content::StoragePartition* partition, const GURL& remove_origin, const StoragePartition::OriginMatcherFunction& origin_matcher, const base::Time delete_begin, base::RunLoop* loop_to_quit) { partition->ClearData(kAllQuotaRemoveMask, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL, remove_origin, origin_matcher, delete_begin, base::Time::Max(), loop_to_quit->QuitClosure()); } void ClearQuotaDataForOrigin( content::StoragePartition* partition, const GURL& remove_origin, const base::Time delete_begin, base::RunLoop* loop_to_quit) { ClearQuotaDataWithOriginMatcher( partition, remove_origin, StoragePartition::OriginMatcherFunction(), delete_begin, loop_to_quit); } void ClearQuotaDataForNonPersistent( content::StoragePartition* partition, const base::Time delete_begin, base::RunLoop* loop_to_quit) { partition->ClearData( kAllQuotaRemoveMask, ~StoragePartition::QUOTA_MANAGED_STORAGE_MASK_PERSISTENT, GURL(), StoragePartition::OriginMatcherFunction(), delete_begin, base::Time::Max(), loop_to_quit->QuitClosure()); } void ClearCookies(content::StoragePartition* partition, const base::Time delete_begin, const base::Time delete_end, base::RunLoop* run_loop) { partition->ClearData( StoragePartition::REMOVE_DATA_MASK_COOKIES, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL, GURL(), StoragePartition::OriginMatcherFunction(), delete_begin, delete_end, run_loop->QuitClosure()); } void ClearCookiesWithMatcher( content::StoragePartition* partition, const base::Time delete_begin, const base::Time delete_end, const StoragePartition::CookieMatcherFunction& cookie_matcher, base::RunLoop* run_loop) { partition->ClearData(StoragePartition::REMOVE_DATA_MASK_COOKIES, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL, StoragePartition::OriginMatcherFunction(), cookie_matcher, delete_begin, delete_end, run_loop->QuitClosure()); } void ClearStuff(uint32_t remove_mask, content::StoragePartition* partition, const base::Time delete_begin, const base::Time delete_end, const StoragePartition::OriginMatcherFunction& origin_matcher, base::RunLoop* run_loop) { partition->ClearData( remove_mask, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL, GURL(), origin_matcher, delete_begin, delete_end, run_loop->QuitClosure()); } void ClearData(content::StoragePartition* partition, base::RunLoop* run_loop) { base::Time time; partition->ClearData( StoragePartition::REMOVE_DATA_MASK_SHADER_CACHE, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL, GURL(), StoragePartition::OriginMatcherFunction(), time, time, run_loop->QuitClosure()); } #if BUILDFLAG(ENABLE_PLUGINS) void ClearPluginPrivateData(content::StoragePartition* partition, const GURL& storage_origin, const base::Time delete_begin, const base::Time delete_end, base::RunLoop* run_loop) { partition->ClearData( StoragePartitionImpl::REMOVE_DATA_MASK_PLUGIN_PRIVATE_DATA, StoragePartition::QUOTA_MANAGED_STORAGE_MASK_ALL, storage_origin, StoragePartition::OriginMatcherFunction(), delete_begin, delete_end, run_loop->QuitClosure()); } #endif // BUILDFLAG(ENABLE_PLUGINS) } // namespace class StoragePartitionImplTest : public testing::Test { public: StoragePartitionImplTest() : thread_bundle_(content::TestBrowserThreadBundle::IO_MAINLOOP), browser_context_(new TestBrowserContext()) {} MockQuotaManager* GetMockManager() { if (!quota_manager_.get()) { quota_manager_ = new MockQuotaManager( browser_context_->IsOffTheRecord(), browser_context_->GetPath(), BrowserThread::GetTaskRunnerForThread(BrowserThread::IO).get(), browser_context_->GetSpecialStoragePolicy()); } return quota_manager_.get(); } TestBrowserContext* browser_context() { return browser_context_.get(); } private: content::TestBrowserThreadBundle thread_bundle_; std::unique_ptr<TestBrowserContext> browser_context_; scoped_refptr<MockQuotaManager> quota_manager_; DISALLOW_COPY_AND_ASSIGN(StoragePartitionImplTest); }; class StoragePartitionShaderClearTest : public testing::Test { public: StoragePartitionShaderClearTest() : thread_bundle_(content::TestBrowserThreadBundle::IO_MAINLOOP), browser_context_(new TestBrowserContext()) { InitShaderCacheFactorySingleton(base::ThreadTaskRunnerHandle::Get()); GetShaderCacheFactorySingleton()->SetCacheInfo( kDefaultClientId, BrowserContext::GetDefaultStoragePartition(browser_context()) ->GetPath()); cache_ = GetShaderCacheFactorySingleton()->Get(kDefaultClientId); } ~StoragePartitionShaderClearTest() override { cache_ = nullptr; GetShaderCacheFactorySingleton()->RemoveCacheInfo(kDefaultClientId); } void InitCache() { net::TestCompletionCallback available_cb; int rv = cache_->SetAvailableCallback(available_cb.callback()); ASSERT_EQ(net::OK, available_cb.GetResult(rv)); EXPECT_EQ(0, cache_->Size()); cache_->Cache(kCacheKey, kCacheValue); net::TestCompletionCallback complete_cb; rv = cache_->SetCacheCompleteCallback(complete_cb.callback()); ASSERT_EQ(net::OK, complete_cb.GetResult(rv)); } size_t Size() { return cache_->Size(); } TestBrowserContext* browser_context() { return browser_context_.get(); } private: content::TestBrowserThreadBundle thread_bundle_; std::unique_ptr<TestBrowserContext> browser_context_; scoped_refptr<gpu::ShaderDiskCache> cache_; }; // Tests --------------------------------------------------------------------- TEST_F(StoragePartitionShaderClearTest, ClearShaderCache) { InitCache(); EXPECT_EQ(1u, Size()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearData, BrowserContext::GetDefaultStoragePartition( browser_context()), &run_loop)); run_loop.Run(); EXPECT_EQ(0u, Size()); } TEST_F(StoragePartitionImplTest, QuotaClientMaskGeneration) { EXPECT_EQ(storage::QuotaClient::kFileSystem, StoragePartitionImpl::GenerateQuotaClientMask( StoragePartition::REMOVE_DATA_MASK_FILE_SYSTEMS)); EXPECT_EQ(storage::QuotaClient::kDatabase, StoragePartitionImpl::GenerateQuotaClientMask( StoragePartition::REMOVE_DATA_MASK_WEBSQL)); EXPECT_EQ(storage::QuotaClient::kAppcache, StoragePartitionImpl::GenerateQuotaClientMask( StoragePartition::REMOVE_DATA_MASK_APPCACHE)); EXPECT_EQ(storage::QuotaClient::kIndexedDatabase, StoragePartitionImpl::GenerateQuotaClientMask( StoragePartition::REMOVE_DATA_MASK_INDEXEDDB)); EXPECT_EQ(storage::QuotaClient::kFileSystem | storage::QuotaClient::kDatabase | storage::QuotaClient::kAppcache | storage::QuotaClient::kIndexedDatabase, StoragePartitionImpl::GenerateQuotaClientMask(kAllQuotaRemoveMask)); } void PopulateTestQuotaManagedPersistentData(MockQuotaManager* manager) { manager->AddOrigin(kOrigin2, kPersistent, kClientFile, base::Time()); manager->AddOrigin(kOrigin3, kPersistent, kClientFile, base::Time::Now() - base::TimeDelta::FromDays(1)); EXPECT_FALSE(manager->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_TRUE(manager->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_TRUE(manager->OriginHasData(kOrigin3, kPersistent, kClientFile)); } void PopulateTestQuotaManagedTemporaryData(MockQuotaManager* manager) { manager->AddOrigin(kOrigin1, kTemporary, kClientFile, base::Time::Now()); manager->AddOrigin(kOrigin3, kTemporary, kClientFile, base::Time::Now() - base::TimeDelta::FromDays(1)); EXPECT_TRUE(manager->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(manager->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_TRUE(manager->OriginHasData(kOrigin3, kTemporary, kClientFile)); } void PopulateTestQuotaManagedData(MockQuotaManager* manager) { // Set up kOrigin1 with a temporary quota, kOrigin2 with a persistent // quota, and kOrigin3 with both. kOrigin1 is modified now, kOrigin2 // is modified at the beginning of time, and kOrigin3 is modified one day // ago. PopulateTestQuotaManagedPersistentData(manager); PopulateTestQuotaManagedTemporaryData(manager); } void PopulateTestQuotaManagedNonBrowsingData(MockQuotaManager* manager) { manager->AddOrigin(kOriginDevTools, kTemporary, kClientFile, base::Time()); manager->AddOrigin(kOriginDevTools, kPersistent, kClientFile, base::Time()); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedDataForeverBoth) { PopulateTestQuotaManagedData(GetMockManager()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaData, partition, &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedDataForeverOnlyTemporary) { PopulateTestQuotaManagedTemporaryData(GetMockManager()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaData, partition, &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedDataForeverOnlyPersistent) { PopulateTestQuotaManagedPersistentData(GetMockManager()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaData, partition, &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedDataForeverNeither) { StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaData, partition, &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedDataForeverSpecificOrigin) { PopulateTestQuotaManagedData(GetMockManager()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaDataForOrigin, partition, kOrigin1, base::Time(), &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedDataForLastHour) { PopulateTestQuotaManagedData(GetMockManager()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaDataForOrigin, partition, GURL(), base::Time::Now() - base::TimeDelta::FromHours(1), &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedDataForLastWeek) { PopulateTestQuotaManagedData(GetMockManager()); base::RunLoop run_loop; StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaDataForNonPersistent, partition, base::Time::Now() - base::TimeDelta::FromDays(7), &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedUnprotectedOrigins) { // Protect kOrigin1. scoped_refptr<MockSpecialStoragePolicy> mock_policy = new MockSpecialStoragePolicy; mock_policy->AddProtected(kOrigin1.GetOrigin()); PopulateTestQuotaManagedData(GetMockManager()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); partition->OverrideSpecialStoragePolicyForTesting(mock_policy.get()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaDataWithOriginMatcher, partition, GURL(), base::Bind(&DoesOriginMatchForUnprotectedWeb), base::Time(), &run_loop)); run_loop.Run(); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedProtectedSpecificOrigin) { // Protect kOrigin1. scoped_refptr<MockSpecialStoragePolicy> mock_policy = new MockSpecialStoragePolicy; mock_policy->AddProtected(kOrigin1.GetOrigin()); PopulateTestQuotaManagedData(GetMockManager()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); partition->OverrideSpecialStoragePolicyForTesting(mock_policy.get()); // Try to remove kOrigin1. Expect failure. base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaDataWithOriginMatcher, partition, kOrigin1, base::Bind(&DoesOriginMatchForUnprotectedWeb), base::Time(), &run_loop)); run_loop.Run(); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedProtectedOrigins) { // Protect kOrigin1. scoped_refptr<MockSpecialStoragePolicy> mock_policy = new MockSpecialStoragePolicy; mock_policy->AddProtected(kOrigin1.GetOrigin()); PopulateTestQuotaManagedData(GetMockManager()); // Try to remove kOrigin1. Expect success. base::RunLoop run_loop; StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); partition->OverrideSpecialStoragePolicyForTesting(mock_policy.get()); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce( &ClearQuotaDataWithOriginMatcher, partition, GURL(), base::Bind(&DoesOriginMatchForBothProtectedAndUnprotectedWeb), base::Time(), &run_loop)); run_loop.Run(); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kTemporary, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin1, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin2, kPersistent, kClientFile)); EXPECT_FALSE(GetMockManager()->OriginHasData(kOrigin3, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveQuotaManagedIgnoreDevTools) { PopulateTestQuotaManagedNonBrowsingData(GetMockManager()); base::RunLoop run_loop; StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideQuotaManagerForTesting( GetMockManager()); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearQuotaDataWithOriginMatcher, partition, GURL(), base::Bind(&DoesOriginMatchUnprotected), base::Time(), &run_loop)); run_loop.Run(); // Check that devtools data isn't removed. EXPECT_TRUE(GetMockManager()->OriginHasData(kOriginDevTools, kTemporary, kClientFile)); EXPECT_TRUE(GetMockManager()->OriginHasData(kOriginDevTools, kPersistent, kClientFile)); } TEST_F(StoragePartitionImplTest, RemoveCookieForever) { RemoveCookieTester tester(browser_context()); tester.AddCookie(); ASSERT_TRUE(tester.ContainsCookie()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->SetURLRequestContext(browser_context()->GetRequestContext()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearCookies, partition, base::Time(), base::Time::Max(), &run_loop)); run_loop.Run(); EXPECT_FALSE(tester.ContainsCookie()); } TEST_F(StoragePartitionImplTest, RemoveCookieLastHour) { RemoveCookieTester tester(browser_context()); tester.AddCookie(); ASSERT_TRUE(tester.ContainsCookie()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); base::Time an_hour_ago = base::Time::Now() - base::TimeDelta::FromHours(1); partition->SetURLRequestContext(browser_context()->GetRequestContext()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearCookies, partition, an_hour_ago, base::Time::Max(), &run_loop)); run_loop.Run(); EXPECT_FALSE(tester.ContainsCookie()); } TEST_F(StoragePartitionImplTest, RemoveCookieWithMatcher) { RemoveCookieTester tester(browser_context()); StoragePartition::CookieMatcherFunction true_predicate = base::Bind(&AlwaysTrueCookiePredicate); StoragePartition::CookieMatcherFunction false_predicate = base::Bind(&AlwaysFalseCookiePredicate); tester.AddCookie(); ASSERT_TRUE(tester.ContainsCookie()); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->SetURLRequestContext(browser_context()->GetRequestContext()); // Return false from our predicate, and make sure the cookies is still around. base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearCookiesWithMatcher, partition, base::Time(), base::Time::Max(), std::move(false_predicate), &run_loop)); run_loop.RunUntilIdle(); EXPECT_TRUE(tester.ContainsCookie()); // Now we return true from our predicate. base::RunLoop run_loop2; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearCookiesWithMatcher, partition, base::Time(), base::Time::Max(), std::move(true_predicate), &run_loop2)); run_loop2.RunUntilIdle(); EXPECT_FALSE(tester.ContainsCookie()); } TEST_F(StoragePartitionImplTest, RemoveUnprotectedLocalStorageForever) { // Protect kOrigin1. scoped_refptr<MockSpecialStoragePolicy> mock_policy = new MockSpecialStoragePolicy; mock_policy->AddProtected(kOrigin1.GetOrigin()); RemoveLocalStorageTester tester(browser_context()); tester.AddDOMStorageTestData(); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin2)); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin3)); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideSpecialStoragePolicyForTesting(mock_policy.get()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearStuff, StoragePartitionImpl::REMOVE_DATA_MASK_LOCAL_STORAGE, partition, base::Time(), base::Time::Max(), base::Bind(&DoesOriginMatchForUnprotectedWeb), &run_loop)); run_loop.Run(); // ClearData only guarantees that tasks to delete data are scheduled when its // callback is invoked. It doesn't guarantee data has actually been cleared. // So run all scheduled tasks to make sure data is cleared. base::RunLoop().RunUntilIdle(); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin1)); EXPECT_FALSE(tester.DOMStorageExistsForOrigin(kOrigin2)); EXPECT_FALSE(tester.DOMStorageExistsForOrigin(kOrigin3)); } TEST_F(StoragePartitionImplTest, RemoveProtectedLocalStorageForever) { // Protect kOrigin1. scoped_refptr<MockSpecialStoragePolicy> mock_policy = new MockSpecialStoragePolicy; mock_policy->AddProtected(kOrigin1.GetOrigin()); RemoveLocalStorageTester tester(browser_context()); tester.AddDOMStorageTestData(); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin2)); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin3)); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); partition->OverrideSpecialStoragePolicyForTesting(mock_policy.get()); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce( &ClearStuff, StoragePartitionImpl::REMOVE_DATA_MASK_LOCAL_STORAGE, partition, base::Time(), base::Time::Max(), base::Bind(&DoesOriginMatchForBothProtectedAndUnprotectedWeb), &run_loop)); run_loop.Run(); // ClearData only guarantees that tasks to delete data are scheduled when its // callback is invoked. It doesn't guarantee data has actually been cleared. // So run all scheduled tasks to make sure data is cleared. base::RunLoop().RunUntilIdle(); // Even if kOrigin1 is protected, it will be deleted since we specify // ClearData to delete protected data. EXPECT_FALSE(tester.DOMStorageExistsForOrigin(kOrigin1)); EXPECT_FALSE(tester.DOMStorageExistsForOrigin(kOrigin2)); EXPECT_FALSE(tester.DOMStorageExistsForOrigin(kOrigin3)); } TEST_F(StoragePartitionImplTest, RemoveLocalStorageForLastWeek) { RemoveLocalStorageTester tester(browser_context()); tester.AddDOMStorageTestData(); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin2)); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin3)); StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); base::Time a_week_ago = base::Time::Now() - base::TimeDelta::FromDays(7); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce( &ClearStuff, StoragePartitionImpl::REMOVE_DATA_MASK_LOCAL_STORAGE, partition, a_week_ago, base::Time::Max(), base::Bind(&DoesOriginMatchForBothProtectedAndUnprotectedWeb), &run_loop)); run_loop.Run(); // ClearData only guarantees that tasks to delete data are scheduled when its // callback is invoked. It doesn't guarantee data has actually been cleared. // So run all scheduled tasks to make sure data is cleared. base::RunLoop().RunUntilIdle(); // kOrigin1 and kOrigin2 do not have age more than a week. EXPECT_FALSE(tester.DOMStorageExistsForOrigin(kOrigin1)); EXPECT_FALSE(tester.DOMStorageExistsForOrigin(kOrigin2)); EXPECT_TRUE(tester.DOMStorageExistsForOrigin(kOrigin3)); } #if BUILDFLAG(ENABLE_PLUGINS) TEST_F(StoragePartitionImplTest, RemovePluginPrivateDataForever) { StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); RemovePluginPrivateDataTester tester(partition->GetFileSystemContext()); tester.AddPluginPrivateTestData(); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin2)); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearPluginPrivateData, partition, GURL(), base::Time(), base::Time::Max(), &run_loop)); run_loop.Run(); EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin2)); } TEST_F(StoragePartitionImplTest, RemovePluginPrivateDataLastWeek) { StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); base::Time a_week_ago = base::Time::Now() - base::TimeDelta::FromDays(7); RemovePluginPrivateDataTester tester(partition->GetFileSystemContext()); tester.AddPluginPrivateTestData(); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin2)); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearPluginPrivateData, partition, GURL(), a_week_ago, base::Time::Max(), &run_loop)); run_loop.Run(); // Origin1 has 1 file from 10 days ago, so it should remain around. // Origin2 has a current file, so it should be removed (even though the // second file is much older). EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin2)); } TEST_F(StoragePartitionImplTest, RemovePluginPrivateDataForOrigin) { StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); RemovePluginPrivateDataTester tester(partition->GetFileSystemContext()); tester.AddPluginPrivateTestData(); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin2)); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearPluginPrivateData, partition, kOrigin1, base::Time(), base::Time::Max(), &run_loop)); run_loop.Run(); // Only Origin1 should be deleted. EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin2)); } TEST_F(StoragePartitionImplTest, RemovePluginPrivateDataWhileWriting) { StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); RemovePluginPrivateDataTester tester(partition->GetFileSystemContext()); tester.AddPluginPrivateTestData(); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin2)); const char test_data[] = {0, 1, 2, 3, 4, 5}; base::File file = tester.OpenClearKeyFileForWrite(); EXPECT_TRUE(file.IsValid()); EXPECT_EQ(static_cast<int>(arraysize(test_data)), file.Write(0, test_data, arraysize(test_data))); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearPluginPrivateData, partition, GURL(), base::Time(), base::Time::Max(), &run_loop)); run_loop.Run(); EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin2)); const char more_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; EXPECT_EQ(static_cast<int>(arraysize(more_data)), file.WriteAtCurrentPos(more_data, arraysize(more_data))); base::File file2 = tester.OpenClearKeyFileForWrite(); EXPECT_FALSE(file2.IsValid()); } TEST_F(StoragePartitionImplTest, RemovePluginPrivateDataAfterDeletion) { StoragePartitionImpl* partition = static_cast<StoragePartitionImpl*>( BrowserContext::GetDefaultStoragePartition(browser_context())); RemovePluginPrivateDataTester tester(partition->GetFileSystemContext()); tester.AddPluginPrivateTestData(); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin2)); // Delete the single file saved for |kOrigin1|. This does not remove the // origin from the list of Origins. However, ClearPluginPrivateData() will // remove it. tester.DeleteClearKeyTestData(); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_TRUE(tester.DataExistsForOrigin(kOrigin2)); base::RunLoop run_loop; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&ClearPluginPrivateData, partition, GURL(), base::Time(), base::Time::Max(), &run_loop)); run_loop.Run(); EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin1)); EXPECT_FALSE(tester.DataExistsForOrigin(kOrigin2)); } #endif // BUILDFLAG(ENABLE_PLUGINS) TEST(StoragePartitionImplStaticTest, CreatePredicateForHostCookies) { GURL url("http://www.example.com/"); GURL url2("https://www.example.com/"); GURL url3("https://www.google.com/"); net::CookieOptions options; net::CookieStore::CookiePredicate predicate = StoragePartitionImpl::CreatePredicateForHostCookies(url); base::Time now = base::Time::Now(); std::vector<std::unique_ptr<CanonicalCookie>> valid_cookies; valid_cookies.push_back(CanonicalCookie::Create(url, "A=B", now, options)); valid_cookies.push_back(CanonicalCookie::Create(url, "C=F", now, options)); // We should match a different scheme with the same host. valid_cookies.push_back(CanonicalCookie::Create(url2, "A=B", now, options)); std::vector<std::unique_ptr<CanonicalCookie>> invalid_cookies; // We don't match domain cookies. invalid_cookies.push_back( CanonicalCookie::Create(url2, "A=B;domain=.example.com", now, options)); invalid_cookies.push_back(CanonicalCookie::Create(url3, "A=B", now, options)); for (const auto& cookie : valid_cookies) EXPECT_TRUE(predicate.Run(*cookie)) << cookie->DebugString(); for (const auto& cookie : invalid_cookies) EXPECT_FALSE(predicate.Run(*cookie)) << cookie->DebugString(); } } // namespace content
null
null
null
null
19,207
65,130
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
65,130
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_ZOOM_CHROME_ZOOM_LEVEL_PREFS_H_ #define CHROME_BROWSER_UI_ZOOM_CHROME_ZOOM_LEVEL_PREFS_H_ #include "base/callback.h" #include "base/files/file_path.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/observer_list.h" #include "components/prefs/json_pref_store.h" #include "components/prefs/pref_change_registrar.h" #include "components/prefs/pref_service.h" #include "components/prefs/pref_store.h" #include "content/public/browser/host_zoom_map.h" #include "content/public/browser/zoom_level_delegate.h" namespace base { class DictionaryValue; } namespace zoom { class ZoomEventManager; } // A class to manage per-partition default and per-host zoom levels in Chrome's // preference system. It implements an interface between the content/ zoom // levels in HostZoomMap and Chrome's preference system. All changes // to the per-partition default zoom levels from chrome/ flow through this // class. Any changes to per-host levels are updated when HostZoomMap calls // OnZoomLevelChanged. class ChromeZoomLevelPrefs : public content::ZoomLevelDelegate { public: typedef base::CallbackList<void(void)>::Subscription DefaultZoomLevelSubscription; // Initialize the pref_service and the partition_key via the constructor, // as these concepts won't be available in the content base class // ZoomLevelDelegate, which will define the InitHostZoomMap interface. // |pref_service_| must outlive this class. ChromeZoomLevelPrefs( PrefService* pref_service, const base::FilePath& profile_path, const base::FilePath& partition_path, base::WeakPtr<zoom::ZoomEventManager> zoom_event_manager); ~ChromeZoomLevelPrefs() override; static std::string GetPartitionKeyForTesting( const base::FilePath& relative_path); void SetDefaultZoomLevelPref(double level); double GetDefaultZoomLevelPref() const; std::unique_ptr<DefaultZoomLevelSubscription> RegisterDefaultZoomLevelCallback(const base::Closure& callback); void ExtractPerHostZoomLevels( const base::DictionaryValue* host_zoom_dictionary, bool sanitize_partition_host_zoom_levels); // content::ZoomLevelDelegate void InitHostZoomMap(content::HostZoomMap* host_zoom_map) override; private: // This is a callback function that receives notifications from HostZoomMap // when per-host zoom levels change. It is used to update the per-host // zoom levels (if any) managed by this class (for its associated partition). void OnZoomLevelChanged(const content::HostZoomMap::ZoomLevelChange& change); // |partition_key_| used to be a hash as returned by // std::hash<std::string>. These functions check for zoom settings // under the old key and copies them to the new one only if there is // not already a new setting. // TODO(thomasanderson): Remove these after Chrome M65 has reached // stable. void MigrateOldZoomPreferences(const base::FilePath& partition_relative_path); void MigrateOldZoomPreferencesForKeys(const std::string& old_key, const std::string& new_key); PrefService* pref_service_; base::WeakPtr<zoom::ZoomEventManager> zoom_event_manager_; content::HostZoomMap* host_zoom_map_; std::unique_ptr<content::HostZoomMap::Subscription> zoom_subscription_; std::string partition_key_; base::CallbackList<void(void)> default_zoom_changed_callbacks_; DISALLOW_COPY_AND_ASSIGN(ChromeZoomLevelPrefs); }; #endif // CHROME_BROWSER_UI_ZOOM_CHROME_ZOOM_LEVEL_PREFS_H_
null
null
null
null
61,993
47,107
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
47,107
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CC_TEST_TEST_LAYER_TREE_HOST_BASE_H_ #define CC_TEST_TEST_LAYER_TREE_HOST_BASE_H_ #include <memory> #include "cc/test/fake_impl_task_runner_provider.h" #include "cc/test/fake_layer_tree_host_impl.h" #include "cc/test/fake_picture_layer_impl.h" #include "cc/test/test_task_graph_runner.h" #include "cc/tiles/tile_priority.h" #include "cc/trees/layer_tree_frame_sink.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/gfx/geometry/size.h" namespace cc { class TestLayerTreeHostBase : public testing::Test { protected: TestLayerTreeHostBase(); ~TestLayerTreeHostBase() override; void SetUp() override; virtual LayerTreeSettings CreateSettings(); virtual std::unique_ptr<LayerTreeFrameSink> CreateLayerTreeFrameSink(); virtual std::unique_ptr<FakeLayerTreeHostImpl> CreateHostImpl( const LayerTreeSettings& settings, TaskRunnerProvider* task_runner_provider, TaskGraphRunner* task_graph_runner); virtual std::unique_ptr<TaskGraphRunner> CreateTaskGraphRunner(); virtual void InitializeRenderer(); void ResetLayerTreeFrameSink( std::unique_ptr<LayerTreeFrameSink> layer_tree_frame_sink); std::unique_ptr<FakeLayerTreeHostImpl> TakeHostImpl(); void SetupDefaultTrees(const gfx::Size& layer_bounds); void SetupTrees(scoped_refptr<RasterSource> pending_raster_source, scoped_refptr<RasterSource> active_raster_source); void SetupPendingTree(scoped_refptr<RasterSource> raster_source); void SetupPendingTree( scoped_refptr<RasterSource> raster_source, const gfx::Size& tile_size, const Region& invalidation, Layer::LayerMaskType mask_type = Layer::LayerMaskType::NOT_MASK); void ActivateTree(); void PerformImplSideInvalidation(); void RebuildPropertyTreesOnPendingTree(); FakeLayerTreeHostImpl* host_impl() const { return host_impl_.get(); } TaskGraphRunner* task_graph_runner() const { return task_graph_runner_.get(); } LayerTreeFrameSink* layer_tree_frame_sink() const { return layer_tree_frame_sink_.get(); } FakePictureLayerImpl* pending_layer() const { return pending_layer_; } FakePictureLayerImpl* active_layer() const { return active_layer_; } FakePictureLayerImpl* old_pending_layer() const { return old_pending_layer_; } int layer_id() const { return id_; } private: void SetInitialTreePriority(); FakeImplTaskRunnerProvider task_runner_provider_; std::unique_ptr<TaskGraphRunner> task_graph_runner_; std::unique_ptr<LayerTreeFrameSink> layer_tree_frame_sink_; std::unique_ptr<FakeLayerTreeHostImpl> host_impl_; FakePictureLayerImpl* pending_layer_; FakePictureLayerImpl* active_layer_; FakePictureLayerImpl* old_pending_layer_; const int root_id_; const int id_; }; } // namespace cc #endif // CC_TEST_TEST_LAYER_TREE_HOST_BASE_H_
null
null
null
null
43,970
34,560
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
34,560
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2012 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/renderer/core/html/html_dialog_element.h" #include "third_party/blink/renderer/bindings/core/v8/exception_state.h" #include "third_party/blink/renderer/core/dom/ax_object_cache.h" #include "third_party/blink/renderer/core/dom/events/event.h" #include "third_party/blink/renderer/core/dom/exception_code.h" #include "third_party/blink/renderer/core/dom/flat_tree_traversal.h" #include "third_party/blink/renderer/core/frame/local_frame_view.h" #include "third_party/blink/renderer/core/frame/use_counter.h" #include "third_party/blink/renderer/core/fullscreen/fullscreen.h" #include "third_party/blink/renderer/core/html/forms/html_form_control_element.h" #include "third_party/blink/renderer/core/html/html_frame_owner_element.h" namespace blink { using namespace HTMLNames; // This function chooses the focused element when show() or showModal() is // invoked, as described in their spec. static void SetFocusForDialog(HTMLDialogElement* dialog) { Element* focusable_descendant = nullptr; Node* next = nullptr; // TODO(kochi): How to find focusable element inside Shadow DOM is not // currently specified. This may change at any time. // See crbug/383230 and https://github.com/whatwg/html/issues/2393 . for (Node* node = FlatTreeTraversal::FirstChild(*dialog); node; node = next) { next = IsHTMLDialogElement(*node) ? FlatTreeTraversal::NextSkippingChildren(*node, dialog) : FlatTreeTraversal::Next(*node, dialog); if (!node->IsElementNode()) continue; Element* element = ToElement(node); if (element->IsFormControlElement()) { HTMLFormControlElement* control = ToHTMLFormControlElement(node); if (control->IsAutofocusable() && control->IsFocusable()) { control->focus(); return; } } if (!focusable_descendant && element->IsFocusable()) focusable_descendant = element; } if (focusable_descendant) { focusable_descendant->focus(); return; } if (dialog->IsFocusable()) { dialog->focus(); return; } dialog->GetDocument().ClearFocusedElement(); } static void InertSubtreesChanged(Document& document) { if (document.GetFrame()) { // SetIsInert recurses through subframes to propagate the inert bit as // needed. document.GetFrame()->SetIsInert(document.LocalOwner() && document.LocalOwner()->IsInert()); } // When a modal dialog opens or closes, nodes all over the accessibility // tree can change inertness which means they must be added or removed from // the tree. The most foolproof way is to clear the entire tree and rebuild // it, though a more clever way is probably possible. document.ClearAXObjectCache(); } inline HTMLDialogElement::HTMLDialogElement(Document& document) : HTMLElement(dialogTag, document), centering_mode_(kNotCentered), centered_position_(0), return_value_("") { UseCounter::Count(document, WebFeature::kDialogElement); } DEFINE_NODE_FACTORY(HTMLDialogElement) void HTMLDialogElement::close(const String& return_value) { // https://html.spec.whatwg.org/#close-the-dialog if (!FastHasAttribute(openAttr)) return; SetBooleanAttribute(openAttr, false); HTMLDialogElement* active_modal_dialog = GetDocument().ActiveModalDialog(); GetDocument().RemoveFromTopLayer(this); if (active_modal_dialog == this) InertSubtreesChanged(GetDocument()); if (!return_value.IsNull()) return_value_ = return_value; ScheduleCloseEvent(); } void HTMLDialogElement::ForceLayoutForCentering() { centering_mode_ = kNeedsCentering; GetDocument().UpdateStyleAndLayoutIgnorePendingStylesheets(); if (centering_mode_ == kNeedsCentering) SetNotCentered(); } void HTMLDialogElement::ScheduleCloseEvent() { Event* event = Event::Create(EventTypeNames::close); event->SetTarget(this); GetDocument().EnqueueAnimationFrameEvent(event); } void HTMLDialogElement::show() { if (FastHasAttribute(openAttr)) return; SetBooleanAttribute(openAttr, true); // The layout must be updated here because setFocusForDialog calls // Element::isFocusable, which requires an up-to-date layout. GetDocument().UpdateStyleAndLayoutIgnorePendingStylesheets(); SetFocusForDialog(this); } void HTMLDialogElement::showModal(ExceptionState& exception_state) { if (FastHasAttribute(openAttr)) { exception_state.ThrowDOMException(kInvalidStateError, "The element already has an 'open' " "attribute, and therefore cannot be " "opened modally."); return; } if (!isConnected()) { exception_state.ThrowDOMException(kInvalidStateError, "The element is not in a Document."); return; } // See comment in |Fullscreen::RequestFullscreen|. if (Fullscreen::IsInFullscreenElementStack(*this)) { UseCounter::Count(GetDocument(), WebFeature::kShowModalForElementInFullscreenStack); } GetDocument().AddToTopLayer(this); SetBooleanAttribute(openAttr, true); // Throw away the AX cache first, so the subsequent steps don't have a chance // of queuing up AX events on objects that would be invalidated when the cache // is thrown away. InertSubtreesChanged(GetDocument()); ForceLayoutForCentering(); SetFocusForDialog(this); } void HTMLDialogElement::RemovedFrom(ContainerNode* insertion_point) { HTMLElement::RemovedFrom(insertion_point); SetNotCentered(); InertSubtreesChanged(GetDocument()); } void HTMLDialogElement::SetCentered(LayoutUnit centered_position) { DCHECK_EQ(centering_mode_, kNeedsCentering); centered_position_ = centered_position; centering_mode_ = kCentered; } void HTMLDialogElement::SetNotCentered() { centering_mode_ = kNotCentered; } bool HTMLDialogElement::IsPresentationAttribute( const QualifiedName& name) const { // FIXME: Workaround for <https://bugs.webkit.org/show_bug.cgi?id=91058>: // modifying an attribute for which there is an attribute selector in html.css // sometimes does not trigger a style recalc. if (name == openAttr) return true; return HTMLElement::IsPresentationAttribute(name); } void HTMLDialogElement::DefaultEventHandler(Event* event) { if (event->type() == EventTypeNames::cancel) { close(); event->SetDefaultHandled(); return; } HTMLElement::DefaultEventHandler(event); } } // namespace blink
null
null
null
null
31,423
31,916
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
31,916
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/css/properties/longhands/object_position.h" #include "third_party/blink/renderer/core/css/css_value_pair.h" #include "third_party/blink/renderer/core/css/parser/css_property_parser_helpers.h" #include "third_party/blink/renderer/core/css/properties/computed_style_utils.h" #include "third_party/blink/renderer/core/frame/web_feature.h" #include "third_party/blink/renderer/core/style/computed_style.h" namespace blink { namespace CSSLonghand { const CSSValue* ObjectPosition::ParseSingleValue( CSSParserTokenRange& range, const CSSParserContext& context, const CSSParserLocalContext&) const { return ConsumePosition(range, context, CSSPropertyParserHelpers::UnitlessQuirk::kForbid, WebFeature::kThreeValuedPositionObjectPosition); } const CSSValue* ObjectPosition::CSSValueFromComputedStyleInternal( const ComputedStyle& style, const SVGComputedStyle&, const LayoutObject*, Node* styled_node, bool allow_visited_style) const { return CSSValuePair::Create( ComputedStyleUtils::ZoomAdjustedPixelValueForLength( style.ObjectPosition().X(), style), ComputedStyleUtils::ZoomAdjustedPixelValueForLength( style.ObjectPosition().Y(), style), CSSValuePair::kKeepIdenticalValues); } } // namespace CSSLonghand } // namespace blink
null
null
null
null
28,779
64,139
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
64,139
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/webui/media_router/media_router_web_ui_test.h" #include "chrome/browser/media/router/media_router_factory.h" #include "chrome/browser/media/router/test/mock_media_router.h" #include "chrome/browser/ui/toolbar/mock_media_router_action_controller.h" #include "chrome/browser/ui/toolbar/toolbar_actions_model.h" #include "chrome/browser/ui/toolbar/toolbar_actions_model_factory.h" #include "chrome/browser/ui/webui/media_router/media_router_ui_service.h" #include "chrome/browser/ui/webui/media_router/media_router_ui_service_factory.h" #include "chrome/test/base/dialog_test_browser_window.h" class MockMediaRouterUIService : public media_router::MediaRouterUIService { public: explicit MockMediaRouterUIService(Profile* profile) : media_router::MediaRouterUIService(profile), action_controller_(profile) {} ~MockMediaRouterUIService() override {} MediaRouterActionController* action_controller() override { return &action_controller_; } private: MockMediaRouterActionController action_controller_; }; std::unique_ptr<KeyedService> BuildMockMediaRouterUIService( content::BrowserContext* context) { return std::make_unique<MockMediaRouterUIService>( static_cast<Profile*>(context)); } std::unique_ptr<KeyedService> BuildToolbarActionsModel( content::BrowserContext* context) { return std::make_unique<ToolbarActionsModel>(static_cast<Profile*>(context), nullptr); } MediaRouterWebUITest::MediaRouterWebUITest() : MediaRouterWebUITest(false) {} MediaRouterWebUITest::MediaRouterWebUITest(bool require_mock_ui_service) : require_mock_ui_service_(require_mock_ui_service) {} MediaRouterWebUITest::~MediaRouterWebUITest() {} TestingProfile::TestingFactories MediaRouterWebUITest::GetTestingFactories() { TestingProfile::TestingFactories factories = { {media_router::MediaRouterFactory::GetInstance(), &media_router::MockMediaRouter::Create}}; if (require_mock_ui_service_) { factories.emplace_back( media_router::MediaRouterUIServiceFactory::GetInstance(), BuildMockMediaRouterUIService); factories.emplace_back(ToolbarActionsModelFactory::GetInstance(), BuildToolbarActionsModel); } return factories; } BrowserWindow* MediaRouterWebUITest::CreateBrowserWindow() { return new DialogTestBrowserWindow; }
null
null
null
null
61,002
20,336
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
185,331
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef FS_ENET_H #define FS_ENET_H #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/types.h> #include <linux/list.h> #include <linux/phy.h> #include <linux/dma-mapping.h> #include <linux/fs_enet_pd.h> #include <asm/fs_pd.h> #ifdef CONFIG_CPM1 #include <asm/cpm1.h> #endif #if defined(CONFIG_FS_ENET_HAS_FEC) #include <asm/cpm.h> #if defined(CONFIG_FS_ENET_MPC5121_FEC) /* MPC5121 FEC has different register layout */ struct fec { u32 fec_reserved0; u32 fec_ievent; /* Interrupt event reg */ u32 fec_imask; /* Interrupt mask reg */ u32 fec_reserved1; u32 fec_r_des_active; /* Receive descriptor reg */ u32 fec_x_des_active; /* Transmit descriptor reg */ u32 fec_reserved2[3]; u32 fec_ecntrl; /* Ethernet control reg */ u32 fec_reserved3[6]; u32 fec_mii_data; /* MII manage frame reg */ u32 fec_mii_speed; /* MII speed control reg */ u32 fec_reserved4[7]; u32 fec_mib_ctrlstat; /* MIB control/status reg */ u32 fec_reserved5[7]; u32 fec_r_cntrl; /* Receive control reg */ u32 fec_reserved6[15]; u32 fec_x_cntrl; /* Transmit Control reg */ u32 fec_reserved7[7]; u32 fec_addr_low; /* Low 32bits MAC address */ u32 fec_addr_high; /* High 16bits MAC address */ u32 fec_opd; /* Opcode + Pause duration */ u32 fec_reserved8[10]; u32 fec_hash_table_high; /* High 32bits hash table */ u32 fec_hash_table_low; /* Low 32bits hash table */ u32 fec_grp_hash_table_high; /* High 32bits hash table */ u32 fec_grp_hash_table_low; /* Low 32bits hash table */ u32 fec_reserved9[7]; u32 fec_x_wmrk; /* FIFO transmit water mark */ u32 fec_reserved10; u32 fec_r_bound; /* FIFO receive bound reg */ u32 fec_r_fstart; /* FIFO receive start reg */ u32 fec_reserved11[11]; u32 fec_r_des_start; /* Receive descriptor ring */ u32 fec_x_des_start; /* Transmit descriptor ring */ u32 fec_r_buff_size; /* Maximum receive buff size */ u32 fec_reserved12[26]; u32 fec_dma_control; /* DMA Endian and other ctrl */ }; #endif struct fec_info { struct fec __iomem *fecp; u32 mii_speed; }; #endif #ifdef CONFIG_CPM2 #include <asm/cpm2.h> #endif /* hw driver ops */ struct fs_ops { int (*setup_data)(struct net_device *dev); int (*allocate_bd)(struct net_device *dev); void (*free_bd)(struct net_device *dev); void (*cleanup_data)(struct net_device *dev); void (*set_multicast_list)(struct net_device *dev); void (*adjust_link)(struct net_device *dev); void (*restart)(struct net_device *dev); void (*stop)(struct net_device *dev); void (*napi_clear_event)(struct net_device *dev); void (*napi_enable)(struct net_device *dev); void (*napi_disable)(struct net_device *dev); void (*rx_bd_done)(struct net_device *dev); void (*tx_kickstart)(struct net_device *dev); u32 (*get_int_events)(struct net_device *dev); void (*clear_int_events)(struct net_device *dev, u32 int_events); void (*ev_error)(struct net_device *dev, u32 int_events); int (*get_regs)(struct net_device *dev, void *p, int *sizep); int (*get_regs_len)(struct net_device *dev); void (*tx_restart)(struct net_device *dev); }; struct phy_info { unsigned int id; const char *name; void (*startup) (struct net_device * dev); void (*shutdown) (struct net_device * dev); void (*ack_int) (struct net_device * dev); }; /* The FEC stores dest/src/type, data, and checksum for receive packets. */ #define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ #define MIN_MTU 46 /* this is data size */ #define CRC_LEN 4 #define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) #define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) /* Must be a multiple of 32 (to cover both FEC & FCC) */ #define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31) /* This is needed so that invalidate_xxx wont invalidate too much */ #define ENET_RX_ALIGN 16 #define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) struct fs_enet_private { struct napi_struct napi; struct device *dev; /* pointer back to the device (must be initialized first) */ struct net_device *ndev; spinlock_t lock; /* during all ops except TX pckt processing */ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ struct fs_platform_info *fpi; const struct fs_ops *ops; int rx_ring, tx_ring; dma_addr_t ring_mem_addr; void __iomem *ring_base; struct sk_buff **rx_skbuff; struct sk_buff **tx_skbuff; char *mapped_as_page; cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ cbd_t __iomem *tx_bd_base; cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ cbd_t __iomem *cur_rx; cbd_t __iomem *cur_tx; int tx_free; struct timer_list phy_timer_list; const struct phy_info *phy; u32 msg_enable; struct mii_if_info mii_if; unsigned int last_mii_status; int interrupt; int oldduplex, oldspeed, oldlink; /* current settings */ /* event masks */ u32 ev_napi; /* mask of NAPI events */ u32 ev; /* event mask */ u32 ev_err; /* error event mask */ u16 bd_rx_empty; /* mask of BD rx empty */ u16 bd_rx_err; /* mask of BD rx errors */ union { struct { int idx; /* FEC1 = 0, FEC2 = 1 */ void __iomem *fecp; /* hw registers */ u32 hthi, htlo; /* state for multicast */ } fec; struct { int idx; /* FCC1-3 = 0-2 */ void __iomem *fccp; /* hw registers */ void __iomem *ep; /* parameter ram */ void __iomem *fcccp; /* hw registers cont. */ void __iomem *mem; /* FCC DPRAM */ u32 gaddrh, gaddrl; /* group address */ } fcc; struct { int idx; /* FEC1 = 0, FEC2 = 1 */ void __iomem *sccp; /* hw registers */ void __iomem *ep; /* parameter ram */ u32 hthi, htlo; /* state for multicast */ } scc; }; }; /***************************************************************************/ void fs_init_bds(struct net_device *dev); void fs_cleanup_bds(struct net_device *dev); /***************************************************************************/ #define DRV_MODULE_NAME "fs_enet" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_RELDATE "Sep 22, 2014" /***************************************************************************/ int fs_enet_platform_init(void); void fs_enet_platform_cleanup(void); /***************************************************************************/ /* buffer descriptor access macros */ /* access macros */ #if defined(CONFIG_CPM1) /* for a a CPM1 __raw_xxx's are sufficient */ #define __cbd_out32(addr, x) __raw_writel(x, addr) #define __cbd_out16(addr, x) __raw_writew(x, addr) #define __cbd_in32(addr) __raw_readl(addr) #define __cbd_in16(addr) __raw_readw(addr) #else /* for others play it safe */ #define __cbd_out32(addr, x) out_be32(addr, x) #define __cbd_out16(addr, x) out_be16(addr, x) #define __cbd_in32(addr) in_be32(addr) #define __cbd_in16(addr) in_be16(addr) #endif /* write */ #define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc)) #define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen)) #define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) /* read */ #define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc) #define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen) #define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr) /* set bits */ #define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) /* clear bits */ #define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) /*******************************************************************/ extern const struct fs_ops fs_fec_ops; extern const struct fs_ops fs_fcc_ops; extern const struct fs_ops fs_scc_ops; /*******************************************************************/ #endif
null
null
null
null
93,678
22,523
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
22,523
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_BACKGROUND_SYNC_BACKGROUND_SYNC_SERVICE_IMPL_H_ #define CONTENT_BROWSER_BACKGROUND_SYNC_BACKGROUND_SYNC_SERVICE_IMPL_H_ #include <stdint.h> #include <memory> #include <vector> #include "base/containers/id_map.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "content/browser/background_sync/background_sync_manager.h" #include "mojo/public/cpp/bindings/binding.h" #include "third_party/blink/public/platform/modules/background_sync/background_sync.mojom.h" namespace content { class BackgroundSyncContext; class CONTENT_EXPORT BackgroundSyncServiceImpl : public blink::mojom::BackgroundSyncService { public: BackgroundSyncServiceImpl( BackgroundSyncContext* background_sync_context, mojo::InterfaceRequest<blink::mojom::BackgroundSyncService> request); ~BackgroundSyncServiceImpl() override; private: friend class BackgroundSyncServiceImplTest; // blink::mojom::BackgroundSyncService methods: void Register(blink::mojom::SyncRegistrationPtr options, int64_t sw_registration_id, RegisterCallback callback) override; void GetRegistrations(int64_t sw_registration_id, GetRegistrationsCallback callback) override; void OnRegisterResult(RegisterCallback callback, BackgroundSyncStatus status, std::unique_ptr<BackgroundSyncRegistration> result); void OnGetRegistrationsResult( GetRegistrationsCallback callback, BackgroundSyncStatus status, std::vector<std::unique_ptr<BackgroundSyncRegistration>> result); // Called when an error is detected on binding_. void OnConnectionError(); // background_sync_context_ owns this. BackgroundSyncContext* background_sync_context_; mojo::Binding<blink::mojom::BackgroundSyncService> binding_; base::WeakPtrFactory<BackgroundSyncServiceImpl> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(BackgroundSyncServiceImpl); }; } // namespace content #endif // CONTENT_BROWSER_BACKGROUND_SYNC_BACKGROUND_SYNC_SERVICE_IMPL_H_
null
null
null
null
19,386
24,058
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,053
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright ? 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> */ /** * @file SDVO command definitions and structures. */ #define SDVO_OUTPUT_FIRST (0) #define SDVO_OUTPUT_TMDS0 (1 << 0) #define SDVO_OUTPUT_RGB0 (1 << 1) #define SDVO_OUTPUT_CVBS0 (1 << 2) #define SDVO_OUTPUT_SVID0 (1 << 3) #define SDVO_OUTPUT_YPRPB0 (1 << 4) #define SDVO_OUTPUT_SCART0 (1 << 5) #define SDVO_OUTPUT_LVDS0 (1 << 6) #define SDVO_OUTPUT_TMDS1 (1 << 8) #define SDVO_OUTPUT_RGB1 (1 << 9) #define SDVO_OUTPUT_CVBS1 (1 << 10) #define SDVO_OUTPUT_SVID1 (1 << 11) #define SDVO_OUTPUT_YPRPB1 (1 << 12) #define SDVO_OUTPUT_SCART1 (1 << 13) #define SDVO_OUTPUT_LVDS1 (1 << 14) #define SDVO_OUTPUT_LAST (14) struct psb_intel_sdvo_caps { u8 vendor_id; u8 device_id; u8 device_rev_id; u8 sdvo_version_major; u8 sdvo_version_minor; unsigned int sdvo_inputs_mask:2; unsigned int smooth_scaling:1; unsigned int sharp_scaling:1; unsigned int up_scaling:1; unsigned int down_scaling:1; unsigned int stall_support:1; unsigned int pad:1; u16 output_flags; } __attribute__((packed)); /** This matches the EDID DTD structure, more or less */ struct psb_intel_sdvo_dtd { struct { u16 clock; /**< pixel clock, in 10kHz units */ u8 h_active; /**< lower 8 bits (pixels) */ u8 h_blank; /**< lower 8 bits (pixels) */ u8 h_high; /**< upper 4 bits each h_active, h_blank */ u8 v_active; /**< lower 8 bits (lines) */ u8 v_blank; /**< lower 8 bits (lines) */ u8 v_high; /**< upper 4 bits each v_active, v_blank */ } part1; struct { u8 h_sync_off; /**< lower 8 bits, from hblank start */ u8 h_sync_width; /**< lower 8 bits (pixels) */ /** lower 4 bits each vsync offset, vsync width */ u8 v_sync_off_width; /** * 2 high bits of hsync offset, 2 high bits of hsync width, * bits 4-5 of vsync offset, and 2 high bits of vsync width. */ u8 sync_off_width_high; u8 dtd_flags; u8 sdvo_flags; /** bits 6-7 of vsync offset at bits 6-7 */ u8 v_sync_off_high; u8 reserved; } part2; } __attribute__((packed)); struct psb_intel_sdvo_pixel_clock_range { u16 min; /**< pixel clock, in 10kHz units */ u16 max; /**< pixel clock, in 10kHz units */ } __attribute__((packed)); struct psb_intel_sdvo_preferred_input_timing_args { u16 clock; u16 width; u16 height; u8 interlace:1; u8 scaled:1; u8 pad:6; } __attribute__((packed)); /* I2C registers for SDVO */ #define SDVO_I2C_ARG_0 0x07 #define SDVO_I2C_ARG_1 0x06 #define SDVO_I2C_ARG_2 0x05 #define SDVO_I2C_ARG_3 0x04 #define SDVO_I2C_ARG_4 0x03 #define SDVO_I2C_ARG_5 0x02 #define SDVO_I2C_ARG_6 0x01 #define SDVO_I2C_ARG_7 0x00 #define SDVO_I2C_OPCODE 0x08 #define SDVO_I2C_CMD_STATUS 0x09 #define SDVO_I2C_RETURN_0 0x0a #define SDVO_I2C_RETURN_1 0x0b #define SDVO_I2C_RETURN_2 0x0c #define SDVO_I2C_RETURN_3 0x0d #define SDVO_I2C_RETURN_4 0x0e #define SDVO_I2C_RETURN_5 0x0f #define SDVO_I2C_RETURN_6 0x10 #define SDVO_I2C_RETURN_7 0x11 #define SDVO_I2C_VENDOR_BEGIN 0x20 /* Status results */ #define SDVO_CMD_STATUS_POWER_ON 0x0 #define SDVO_CMD_STATUS_SUCCESS 0x1 #define SDVO_CMD_STATUS_NOTSUPP 0x2 #define SDVO_CMD_STATUS_INVALID_ARG 0x3 #define SDVO_CMD_STATUS_PENDING 0x4 #define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5 #define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6 /* SDVO commands, argument/result registers */ #define SDVO_CMD_RESET 0x01 /** Returns a struct intel_sdvo_caps */ #define SDVO_CMD_GET_DEVICE_CAPS 0x02 #define SDVO_CMD_GET_FIRMWARE_REV 0x86 # define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0 # define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 # define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 /** * Reports which inputs are trained (managed to sync). * * Devices must have trained within 2 vsyncs of a mode change. */ #define SDVO_CMD_GET_TRAINED_INPUTS 0x03 struct psb_intel_sdvo_get_trained_inputs_response { unsigned int input0_trained:1; unsigned int input1_trained:1; unsigned int pad:6; } __attribute__((packed)); /** Returns a struct intel_sdvo_output_flags of active outputs. */ #define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 /** * Sets the current set of active outputs. * * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP * on multi-output devices. */ #define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 /** * Returns the current mapping of SDVO inputs to outputs on the device. * * Returns two struct intel_sdvo_output_flags structures. */ #define SDVO_CMD_GET_IN_OUT_MAP 0x06 struct psb_intel_sdvo_in_out_map { u16 in0, in1; }; /** * Sets the current mapping of SDVO inputs to outputs on the device. * * Takes two struct i380_sdvo_output_flags structures. */ #define SDVO_CMD_SET_IN_OUT_MAP 0x07 /** * Returns a struct intel_sdvo_output_flags of attached displays. */ #define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b /** * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging. */ #define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c /** * Takes a struct intel_sdvo_output_flags. */ #define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d /** * Returns a struct intel_sdvo_output_flags of displays with hot plug * interrupts enabled. */ #define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e #define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f struct intel_sdvo_get_interrupt_event_source_response { u16 interrupt_status; unsigned int ambient_light_interrupt:1; unsigned int hdmi_audio_encrypt_change:1; unsigned int pad:6; } __attribute__((packed)); /** * Selects which input is affected by future input commands. * * Commands affected include SET_INPUT_TIMINGS_PART[12], * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12], * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS. */ #define SDVO_CMD_SET_TARGET_INPUT 0x10 struct psb_intel_sdvo_set_target_input_args { unsigned int target_1:1; unsigned int pad:7; } __attribute__((packed)); /** * Takes a struct intel_sdvo_output_flags of which outputs are targeted by * future output commands. * * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE. */ #define SDVO_CMD_SET_TARGET_OUTPUT 0x11 #define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12 #define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13 #define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14 #define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15 #define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16 #define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17 #define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18 #define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19 /* Part 1 */ # define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0 # define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1 # define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2 # define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3 # define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4 # define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5 # define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6 # define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7 /* Part 2 */ # define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0 # define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1 # define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2 # define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3 # define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4 # define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7) # define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5) # define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3) # define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1) # define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5 # define SDVO_DTD_SDVO_FLAG_STALL (1 << 7) # define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6) # define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6) # define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4) # define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4) # define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4) # define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) # define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 /** * Generates a DTD based on the given width, height, and flags. * * This will be supported by any device supporting scaling or interlaced * modes. */ #define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a # define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0 # define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1 # define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2 # define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3 # define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4 # define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5 # define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6 # define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0) # define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1) #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c /** Returns a struct intel_sdvo_pixel_clock_range */ #define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d /** Returns a struct intel_sdvo_pixel_clock_range */ #define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e /** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ #define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f /** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ #define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 /** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ #define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 # define SDVO_CLOCK_RATE_MULT_1X (1 << 0) # define SDVO_CLOCK_RATE_MULT_2X (1 << 1) # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 /** 6 bytes of bit flags for TV formats shared by all TV format functions */ struct psb_intel_sdvo_tv_format { unsigned int ntsc_m:1; unsigned int ntsc_j:1; unsigned int ntsc_443:1; unsigned int pal_b:1; unsigned int pal_d:1; unsigned int pal_g:1; unsigned int pal_h:1; unsigned int pal_i:1; unsigned int pal_m:1; unsigned int pal_n:1; unsigned int pal_nc:1; unsigned int pal_60:1; unsigned int secam_b:1; unsigned int secam_d:1; unsigned int secam_g:1; unsigned int secam_k:1; unsigned int secam_k1:1; unsigned int secam_l:1; unsigned int secam_60:1; unsigned int hdtv_std_smpte_240m_1080i_59:1; unsigned int hdtv_std_smpte_240m_1080i_60:1; unsigned int hdtv_std_smpte_260m_1080i_59:1; unsigned int hdtv_std_smpte_260m_1080i_60:1; unsigned int hdtv_std_smpte_274m_1080i_50:1; unsigned int hdtv_std_smpte_274m_1080i_59:1; unsigned int hdtv_std_smpte_274m_1080i_60:1; unsigned int hdtv_std_smpte_274m_1080p_23:1; unsigned int hdtv_std_smpte_274m_1080p_24:1; unsigned int hdtv_std_smpte_274m_1080p_25:1; unsigned int hdtv_std_smpte_274m_1080p_29:1; unsigned int hdtv_std_smpte_274m_1080p_30:1; unsigned int hdtv_std_smpte_274m_1080p_50:1; unsigned int hdtv_std_smpte_274m_1080p_59:1; unsigned int hdtv_std_smpte_274m_1080p_60:1; unsigned int hdtv_std_smpte_295m_1080i_50:1; unsigned int hdtv_std_smpte_295m_1080p_50:1; unsigned int hdtv_std_smpte_296m_720p_59:1; unsigned int hdtv_std_smpte_296m_720p_60:1; unsigned int hdtv_std_smpte_296m_720p_50:1; unsigned int hdtv_std_smpte_293m_480p_59:1; unsigned int hdtv_std_smpte_170m_480i_59:1; unsigned int hdtv_std_iturbt601_576i_50:1; unsigned int hdtv_std_iturbt601_576p_50:1; unsigned int hdtv_std_eia_7702a_480i_60:1; unsigned int hdtv_std_eia_7702a_480p_60:1; unsigned int pad:3; } __attribute__((packed)); #define SDVO_CMD_GET_TV_FORMAT 0x28 #define SDVO_CMD_SET_TV_FORMAT 0x29 /** Returns the resolutiosn that can be used with the given TV format */ #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 struct psb_intel_sdvo_sdtv_resolution_request { unsigned int ntsc_m:1; unsigned int ntsc_j:1; unsigned int ntsc_443:1; unsigned int pal_b:1; unsigned int pal_d:1; unsigned int pal_g:1; unsigned int pal_h:1; unsigned int pal_i:1; unsigned int pal_m:1; unsigned int pal_n:1; unsigned int pal_nc:1; unsigned int pal_60:1; unsigned int secam_b:1; unsigned int secam_d:1; unsigned int secam_g:1; unsigned int secam_k:1; unsigned int secam_k1:1; unsigned int secam_l:1; unsigned int secam_60:1; unsigned int pad:5; } __attribute__((packed)); struct psb_intel_sdvo_sdtv_resolution_reply { unsigned int res_320x200:1; unsigned int res_320x240:1; unsigned int res_400x300:1; unsigned int res_640x350:1; unsigned int res_640x400:1; unsigned int res_640x480:1; unsigned int res_704x480:1; unsigned int res_704x576:1; unsigned int res_720x350:1; unsigned int res_720x400:1; unsigned int res_720x480:1; unsigned int res_720x540:1; unsigned int res_720x576:1; unsigned int res_768x576:1; unsigned int res_800x600:1; unsigned int res_832x624:1; unsigned int res_920x766:1; unsigned int res_1024x768:1; unsigned int res_1280x1024:1; unsigned int pad:5; } __attribute__((packed)); /* Get supported resolution with squire pixel aspect ratio that can be scaled for the requested HDTV format */ #define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85 struct psb_intel_sdvo_hdtv_resolution_request { unsigned int hdtv_std_smpte_240m_1080i_59:1; unsigned int hdtv_std_smpte_240m_1080i_60:1; unsigned int hdtv_std_smpte_260m_1080i_59:1; unsigned int hdtv_std_smpte_260m_1080i_60:1; unsigned int hdtv_std_smpte_274m_1080i_50:1; unsigned int hdtv_std_smpte_274m_1080i_59:1; unsigned int hdtv_std_smpte_274m_1080i_60:1; unsigned int hdtv_std_smpte_274m_1080p_23:1; unsigned int hdtv_std_smpte_274m_1080p_24:1; unsigned int hdtv_std_smpte_274m_1080p_25:1; unsigned int hdtv_std_smpte_274m_1080p_29:1; unsigned int hdtv_std_smpte_274m_1080p_30:1; unsigned int hdtv_std_smpte_274m_1080p_50:1; unsigned int hdtv_std_smpte_274m_1080p_59:1; unsigned int hdtv_std_smpte_274m_1080p_60:1; unsigned int hdtv_std_smpte_295m_1080i_50:1; unsigned int hdtv_std_smpte_295m_1080p_50:1; unsigned int hdtv_std_smpte_296m_720p_59:1; unsigned int hdtv_std_smpte_296m_720p_60:1; unsigned int hdtv_std_smpte_296m_720p_50:1; unsigned int hdtv_std_smpte_293m_480p_59:1; unsigned int hdtv_std_smpte_170m_480i_59:1; unsigned int hdtv_std_iturbt601_576i_50:1; unsigned int hdtv_std_iturbt601_576p_50:1; unsigned int hdtv_std_eia_7702a_480i_60:1; unsigned int hdtv_std_eia_7702a_480p_60:1; unsigned int pad:6; } __attribute__((packed)); struct psb_intel_sdvo_hdtv_resolution_reply { unsigned int res_640x480:1; unsigned int res_800x600:1; unsigned int res_1024x768:1; unsigned int res_1280x960:1; unsigned int res_1400x1050:1; unsigned int res_1600x1200:1; unsigned int res_1920x1440:1; unsigned int res_2048x1536:1; unsigned int res_2560x1920:1; unsigned int res_3200x2400:1; unsigned int res_3840x2880:1; unsigned int pad1:5; unsigned int res_848x480:1; unsigned int res_1064x600:1; unsigned int res_1280x720:1; unsigned int res_1360x768:1; unsigned int res_1704x960:1; unsigned int res_1864x1050:1; unsigned int res_1920x1080:1; unsigned int res_2128x1200:1; unsigned int res_2560x1400:1; unsigned int res_2728x1536:1; unsigned int res_3408x1920:1; unsigned int res_4264x2400:1; unsigned int res_5120x2880:1; unsigned int pad2:3; unsigned int res_768x480:1; unsigned int res_960x600:1; unsigned int res_1152x720:1; unsigned int res_1124x768:1; unsigned int res_1536x960:1; unsigned int res_1680x1050:1; unsigned int res_1728x1080:1; unsigned int res_1920x1200:1; unsigned int res_2304x1440:1; unsigned int res_2456x1536:1; unsigned int res_3072x1920:1; unsigned int res_3840x2400:1; unsigned int res_4608x2880:1; unsigned int pad3:3; unsigned int res_1280x1024:1; unsigned int pad4:7; unsigned int res_1280x768:1; unsigned int pad5:7; } __attribute__((packed)); /* Get supported power state returns info for encoder and monitor, rely on last SetTargetInput and SetTargetOutput calls */ #define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a /* Get power state returns info for encoder and monitor, rely on last SetTargetInput and SetTargetOutput calls */ #define SDVO_CMD_GET_POWER_STATE 0x2b #define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b #define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c # define SDVO_ENCODER_STATE_ON (1 << 0) # define SDVO_ENCODER_STATE_STANDBY (1 << 1) # define SDVO_ENCODER_STATE_SUSPEND (1 << 2) # define SDVO_ENCODER_STATE_OFF (1 << 3) # define SDVO_MONITOR_STATE_ON (1 << 4) # define SDVO_MONITOR_STATE_STANDBY (1 << 5) # define SDVO_MONITOR_STATE_SUSPEND (1 << 6) # define SDVO_MONITOR_STATE_OFF (1 << 7) #define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d #define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e #define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f /** * The panel power sequencing parameters are in units of milliseconds. * The high fields are bits 8:9 of the 10-bit values. */ struct psb_sdvo_panel_power_sequencing { u8 t0; u8 t1; u8 t2; u8 t3; u8 t4; unsigned int t0_high:2; unsigned int t1_high:2; unsigned int t2_high:2; unsigned int t3_high:2; unsigned int t4_high:2; unsigned int pad:6; } __attribute__((packed)); #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 struct sdvo_max_backlight_reply { u8 max_value; u8 default_value; } __attribute__((packed)); #define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 #define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32 #define SDVO_CMD_GET_AMBIENT_LIGHT 0x33 struct sdvo_get_ambient_light_reply { u16 trip_low; u16 trip_high; u16 value; } __attribute__((packed)); #define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 struct sdvo_set_ambient_light_reply { u16 trip_low; u16 trip_high; unsigned int enable:1; unsigned int pad:7; } __attribute__((packed)); /* Set display power state */ #define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d # define SDVO_DISPLAY_STATE_ON (1 << 0) # define SDVO_DISPLAY_STATE_STANDBY (1 << 1) # define SDVO_DISPLAY_STATE_SUSPEND (1 << 2) # define SDVO_DISPLAY_STATE_OFF (1 << 3) #define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84 struct psb_intel_sdvo_enhancements_reply { unsigned int flicker_filter:1; unsigned int flicker_filter_adaptive:1; unsigned int flicker_filter_2d:1; unsigned int saturation:1; unsigned int hue:1; unsigned int brightness:1; unsigned int contrast:1; unsigned int overscan_h:1; unsigned int overscan_v:1; unsigned int hpos:1; unsigned int vpos:1; unsigned int sharpness:1; unsigned int dot_crawl:1; unsigned int dither:1; unsigned int tv_chroma_filter:1; unsigned int tv_luma_filter:1; } __attribute__((packed)); /* Picture enhancement limits below are dependent on the current TV format, * and thus need to be queried and set after it. */ #define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d #define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b #define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52 #define SDVO_CMD_GET_MAX_SATURATION 0x55 #define SDVO_CMD_GET_MAX_HUE 0x58 #define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b #define SDVO_CMD_GET_MAX_CONTRAST 0x5e #define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 #define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 #define SDVO_CMD_GET_MAX_HPOS 0x67 #define SDVO_CMD_GET_MAX_VPOS 0x6a #define SDVO_CMD_GET_MAX_SHARPNESS 0x6d #define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74 #define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77 struct psb_intel_sdvo_enhancement_limits_reply { u16 max_value; u16 default_value; } __attribute__((packed)); #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f #define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80 # define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0) # define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0) # define SDVO_LVDS_CONNECTOR_SPWG (0 << 2) # define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2) # define SDVO_LVDS_SINGLE_CHANNEL (0 << 4) # define SDVO_LVDS_DUAL_CHANNEL (1 << 4) #define SDVO_CMD_GET_FLICKER_FILTER 0x4e #define SDVO_CMD_SET_FLICKER_FILTER 0x4f #define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50 #define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51 #define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53 #define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54 #define SDVO_CMD_GET_SATURATION 0x56 #define SDVO_CMD_SET_SATURATION 0x57 #define SDVO_CMD_GET_HUE 0x59 #define SDVO_CMD_SET_HUE 0x5a #define SDVO_CMD_GET_BRIGHTNESS 0x5c #define SDVO_CMD_SET_BRIGHTNESS 0x5d #define SDVO_CMD_GET_CONTRAST 0x5f #define SDVO_CMD_SET_CONTRAST 0x60 #define SDVO_CMD_GET_OVERSCAN_H 0x62 #define SDVO_CMD_SET_OVERSCAN_H 0x63 #define SDVO_CMD_GET_OVERSCAN_V 0x65 #define SDVO_CMD_SET_OVERSCAN_V 0x66 #define SDVO_CMD_GET_HPOS 0x68 #define SDVO_CMD_SET_HPOS 0x69 #define SDVO_CMD_GET_VPOS 0x6b #define SDVO_CMD_SET_VPOS 0x6c #define SDVO_CMD_GET_SHARPNESS 0x6e #define SDVO_CMD_SET_SHARPNESS 0x6f #define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75 #define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76 #define SDVO_CMD_GET_TV_LUMA_FILTER 0x78 #define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 struct psb_intel_sdvo_enhancements_arg { u16 value; }__attribute__((packed)); #define SDVO_CMD_GET_DOT_CRAWL 0x70 #define SDVO_CMD_SET_DOT_CRAWL 0x71 # define SDVO_DOT_CRAWL_ON (1 << 0) # define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1) #define SDVO_CMD_GET_DITHER 0x72 #define SDVO_CMD_SET_DITHER 0x73 # define SDVO_DITHER_ON (1 << 0) # define SDVO_DITHER_DEFAULT_ON (1 << 1) #define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a # define SDVO_CONTROL_BUS_PROM (1 << 0) # define SDVO_CONTROL_BUS_DDC1 (1 << 1) # define SDVO_CONTROL_BUS_DDC2 (1 << 2) # define SDVO_CONTROL_BUS_DDC3 (1 << 3) /* HDMI op codes */ #define SDVO_CMD_GET_SUPP_ENCODE 0x9d #define SDVO_CMD_GET_ENCODE 0x9e #define SDVO_CMD_SET_ENCODE 0x9f #define SDVO_ENCODE_DVI 0x0 #define SDVO_ENCODE_HDMI 0x1 #define SDVO_CMD_SET_PIXEL_REPLI 0x8b #define SDVO_CMD_GET_PIXEL_REPLI 0x8c #define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d #define SDVO_CMD_SET_COLORIMETRY 0x8e #define SDVO_COLORIMETRY_RGB256 0x0 #define SDVO_COLORIMETRY_RGB220 0x1 #define SDVO_COLORIMETRY_YCrCb422 0x3 #define SDVO_COLORIMETRY_YCrCb444 0x4 #define SDVO_CMD_GET_COLORIMETRY 0x8f #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 #define SDVO_CMD_SET_HBUF_INDEX 0x93 #define SDVO_CMD_GET_HBUF_INDEX 0x94 #define SDVO_CMD_GET_HBUF_INFO 0x95 #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 #define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97 #define SDVO_CMD_SET_HBUF_DATA 0x98 #define SDVO_CMD_GET_HBUF_DATA 0x99 #define SDVO_CMD_SET_HBUF_TXRATE 0x9a #define SDVO_CMD_GET_HBUF_TXRATE 0x9b #define SDVO_HBUF_TX_DISABLED (0 << 6) #define SDVO_HBUF_TX_ONCE (2 << 6) #define SDVO_HBUF_TX_VSYNC (3 << 6) #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c #define SDVO_NEED_TO_STALL (1 << 7) struct psb_intel_sdvo_encode { u8 dvi_rev; u8 hdmi_rev; } __attribute__ ((packed));
null
null
null
null
97,400
32,891
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
197,886
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/iio/common/ssp_sensors.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/kfifo_buf.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "../common/ssp_sensors/ssp_iio_sensor.h" #define SSP_CHANNEL_COUNT 3 #define SSP_GYROSCOPE_NAME "ssp-gyroscope" static const char ssp_gyro_name[] = SSP_GYROSCOPE_NAME; enum ssp_gyro_3d_channel { SSP_CHANNEL_SCAN_INDEX_X, SSP_CHANNEL_SCAN_INDEX_Y, SSP_CHANNEL_SCAN_INDEX_Z, SSP_CHANNEL_SCAN_INDEX_TIME, }; static int ssp_gyro_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { u32 t; struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent); switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: t = ssp_get_sensor_delay(data, SSP_GYROSCOPE_SENSOR); ssp_convert_to_freq(t, val, val2); return IIO_VAL_INT_PLUS_MICRO; default: break; } return -EINVAL; } static int ssp_gyro_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int ret; struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent); switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: ret = ssp_convert_to_time(val, val2); ret = ssp_change_delay(data, SSP_GYROSCOPE_SENSOR, ret); if (ret < 0) dev_err(&indio_dev->dev, "gyro sensor enable fail\n"); return ret; default: break; } return -EINVAL; } static const struct iio_info ssp_gyro_iio_info = { .read_raw = &ssp_gyro_read_raw, .write_raw = &ssp_gyro_write_raw, }; static const unsigned long ssp_gyro_scan_mask[] = { 0x07, 0, }; static const struct iio_chan_spec ssp_gyro_channels[] = { SSP_CHANNEL_AG(IIO_ANGL_VEL, IIO_MOD_X, SSP_CHANNEL_SCAN_INDEX_X), SSP_CHANNEL_AG(IIO_ANGL_VEL, IIO_MOD_Y, SSP_CHANNEL_SCAN_INDEX_Y), SSP_CHANNEL_AG(IIO_ANGL_VEL, IIO_MOD_Z, SSP_CHANNEL_SCAN_INDEX_Z), SSP_CHAN_TIMESTAMP(SSP_CHANNEL_SCAN_INDEX_TIME), }; static int ssp_process_gyro_data(struct iio_dev *indio_dev, void *buf, int64_t timestamp) { return ssp_common_process_data(indio_dev, buf, SSP_GYROSCOPE_SIZE, timestamp); } static const struct iio_buffer_setup_ops ssp_gyro_buffer_ops = { .postenable = &ssp_common_buffer_postenable, .postdisable = &ssp_common_buffer_postdisable, }; static int ssp_gyro_probe(struct platform_device *pdev) { int ret; struct iio_dev *indio_dev; struct ssp_sensor_data *spd; struct iio_buffer *buffer; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*spd)); if (!indio_dev) return -ENOMEM; spd = iio_priv(indio_dev); spd->process_data = ssp_process_gyro_data; spd->type = SSP_GYROSCOPE_SENSOR; indio_dev->name = ssp_gyro_name; indio_dev->dev.parent = &pdev->dev; indio_dev->info = &ssp_gyro_iio_info; indio_dev->modes = INDIO_BUFFER_SOFTWARE; indio_dev->channels = ssp_gyro_channels; indio_dev->num_channels = ARRAY_SIZE(ssp_gyro_channels); indio_dev->available_scan_masks = ssp_gyro_scan_mask; buffer = devm_iio_kfifo_allocate(&pdev->dev); if (!buffer) return -ENOMEM; iio_device_attach_buffer(indio_dev, buffer); indio_dev->setup_ops = &ssp_gyro_buffer_ops; platform_set_drvdata(pdev, indio_dev); ret = devm_iio_device_register(&pdev->dev, indio_dev); if (ret < 0) return ret; /* ssp registering should be done after all iio setup */ ssp_register_consumer(indio_dev, SSP_GYROSCOPE_SENSOR); return 0; } static struct platform_driver ssp_gyro_driver = { .driver = { .name = SSP_GYROSCOPE_NAME, }, .probe = ssp_gyro_probe, }; module_platform_driver(ssp_gyro_driver); MODULE_AUTHOR("Karol Wrona <k.wrona@samsung.com>"); MODULE_DESCRIPTION("Samsung sensorhub gyroscopes driver"); MODULE_LICENSE("GPL");
null
null
null
null
106,233
8,278
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
173,273
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _ASM_X86_STAT_H #define _ASM_X86_STAT_H #include <asm/posix_types.h> #define STAT_HAVE_NSEC 1 #ifdef __i386__ struct stat { unsigned long st_dev; unsigned long st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned long st_rdev; unsigned long st_size; unsigned long st_blksize; unsigned long st_blocks; unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned long st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long __unused4; unsigned long __unused5; }; /* We don't need to memset the whole thing just to initialize the padding */ #define INIT_STRUCT_STAT_PADDING(st) do { \ st.__unused4 = 0; \ st.__unused5 = 0; \ } while (0) #define STAT64_HAS_BROKEN_ST_INO 1 /* This matches struct stat64 in glibc2.1, hence the absolutely * insane amounts of padding around dev_t's. */ struct stat64 { unsigned long long st_dev; unsigned char __pad0[4]; unsigned long __st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned long st_uid; unsigned long st_gid; unsigned long long st_rdev; unsigned char __pad3[4]; long long st_size; unsigned long st_blksize; /* Number 512-byte blocks allocated. */ unsigned long long st_blocks; unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned int st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long long st_ino; }; /* We don't need to memset the whole thing just to initialize the padding */ #define INIT_STRUCT_STAT64_PADDING(st) do { \ memset(&st.__pad0, 0, sizeof(st.__pad0)); \ memset(&st.__pad3, 0, sizeof(st.__pad3)); \ } while (0) #else /* __i386__ */ struct stat { __kernel_ulong_t st_dev; __kernel_ulong_t st_ino; __kernel_ulong_t st_nlink; unsigned int st_mode; unsigned int st_uid; unsigned int st_gid; unsigned int __pad0; __kernel_ulong_t st_rdev; __kernel_long_t st_size; __kernel_long_t st_blksize; __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */ __kernel_ulong_t st_atime; __kernel_ulong_t st_atime_nsec; __kernel_ulong_t st_mtime; __kernel_ulong_t st_mtime_nsec; __kernel_ulong_t st_ctime; __kernel_ulong_t st_ctime_nsec; __kernel_long_t __unused[3]; }; /* We don't need to memset the whole thing just to initialize the padding */ #define INIT_STRUCT_STAT_PADDING(st) do { \ st.__pad0 = 0; \ st.__unused[0] = 0; \ st.__unused[1] = 0; \ st.__unused[2] = 0; \ } while (0) #endif /* for 32bit emulation and 32 bit kernels */ struct __old_kernel_stat { unsigned short st_dev; unsigned short st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned short st_rdev; #ifdef __i386__ unsigned long st_size; unsigned long st_atime; unsigned long st_mtime; unsigned long st_ctime; #else unsigned int st_size; unsigned int st_atime; unsigned int st_mtime; unsigned int st_ctime; #endif }; #endif /* _ASM_X86_STAT_H */
null
null
null
null
81,620
51,945
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,945
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "media/gpu/v4l2/v4l2_video_decode_accelerator.h" #include <dlfcn.h> #include <errno.h> #include <fcntl.h> #include <linux/videodev2.h> #include <poll.h> #include <string.h> #include <sys/eventfd.h> #include <sys/ioctl.h> #include <sys/mman.h> #include "base/bind.h" #include "base/command_line.h" #include "base/message_loop/message_loop.h" #include "base/numerics/safe_conversions.h" #include "base/posix/eintr_wrapper.h" #include "base/single_thread_task_runner.h" #include "base/threading/thread_task_runner_handle.h" #include "base/trace_event/trace_event.h" #include "build/build_config.h" #include "media/base/media_switches.h" #include "media/gpu/shared_memory_region.h" #include "media/video/h264_parser.h" #include "ui/gfx/geometry/rect.h" #include "ui/gl/gl_context.h" #include "ui/gl/scoped_binders.h" #define DVLOGF(level) DVLOG(level) << __func__ << "(): " #define VLOGF(level) VLOG(level) << __func__ << "(): " #define VPLOGF(level) VPLOG(level) << __func__ << "(): " #define NOTIFY_ERROR(x) \ do { \ VLOGF(1) << "Setting error state:" << x; \ SetErrorState(x); \ } while (0) #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \ do { \ if (device_->Ioctl(type, arg) != 0) { \ VPLOGF(1) << "ioctl() failed: " << type_str; \ NOTIFY_ERROR(PLATFORM_FAILURE); \ return value; \ } \ } while (0) #define IOCTL_OR_ERROR_RETURN(type, arg) \ IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type) #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \ IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type) #define IOCTL_OR_LOG_ERROR(type, arg) \ do { \ if (device_->Ioctl(type, arg) != 0) \ VPLOGF(1) << "ioctl() failed: " << #type; \ } while (0) namespace media { // static const uint32_t V4L2VideoDecodeAccelerator::supported_input_fourccs_[] = { V4L2_PIX_FMT_H264, V4L2_PIX_FMT_VP8, V4L2_PIX_FMT_VP9, }; struct V4L2VideoDecodeAccelerator::BitstreamBufferRef { BitstreamBufferRef( base::WeakPtr<Client>& client, scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, std::unique_ptr<SharedMemoryRegion> shm, int32_t input_id); ~BitstreamBufferRef(); const base::WeakPtr<Client> client; const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner; const std::unique_ptr<SharedMemoryRegion> shm; size_t bytes_used; const int32_t input_id; }; struct V4L2VideoDecodeAccelerator::EGLSyncKHRRef { EGLSyncKHRRef(EGLDisplay egl_display, EGLSyncKHR egl_sync); ~EGLSyncKHRRef(); EGLDisplay const egl_display; EGLSyncKHR egl_sync; }; V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef( base::WeakPtr<Client>& client, scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, std::unique_ptr<SharedMemoryRegion> shm, int32_t input_id) : client(client), client_task_runner(client_task_runner), shm(std::move(shm)), bytes_used(0), input_id(input_id) {} V4L2VideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() { if (input_id >= 0) { client_task_runner->PostTask( FROM_HERE, base::Bind(&Client::NotifyEndOfBitstreamBuffer, client, input_id)); } } V4L2VideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(EGLDisplay egl_display, EGLSyncKHR egl_sync) : egl_display(egl_display), egl_sync(egl_sync) {} V4L2VideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() { // We don't check for eglDestroySyncKHR failures, because if we get here // with a valid sync object, something went wrong and we are getting // destroyed anyway. if (egl_sync != EGL_NO_SYNC_KHR) eglDestroySyncKHR(egl_display, egl_sync); } V4L2VideoDecodeAccelerator::InputRecord::InputRecord() : at_device(false), address(NULL), length(0), bytes_used(0), input_id(-1) {} V4L2VideoDecodeAccelerator::InputRecord::~InputRecord() {} V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord() : state(kFree), egl_image(EGL_NO_IMAGE_KHR), egl_sync(EGL_NO_SYNC_KHR), picture_id(-1), texture_id(0), cleared(false) {} V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord(OutputRecord&&) = default; V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {} V4L2VideoDecodeAccelerator::PictureRecord::PictureRecord(bool cleared, const Picture& picture) : cleared(cleared), picture(picture) {} V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {} V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator( EGLDisplay egl_display, const GetGLContextCallback& get_gl_context_cb, const MakeGLContextCurrentCallback& make_context_current_cb, const scoped_refptr<V4L2Device>& device) : child_task_runner_(base::ThreadTaskRunnerHandle::Get()), decoder_thread_("V4L2DecoderThread"), decoder_state_(kUninitialized), output_mode_(Config::OutputMode::ALLOCATE), device_(device), decoder_delay_bitstream_buffer_id_(-1), decoder_current_input_buffer_(-1), decoder_decode_buffer_tasks_scheduled_(0), decoder_frames_at_client_(0), decoder_flushing_(false), decoder_cmd_supported_(false), flush_awaiting_last_output_buffer_(false), reset_pending_(false), decoder_partial_frame_pending_(false), input_streamon_(false), input_buffer_queued_count_(0), output_streamon_(false), output_buffer_queued_count_(0), output_dpb_size_(0), output_planes_count_(0), picture_clearing_count_(0), device_poll_thread_("V4L2DevicePollThread"), egl_display_(egl_display), get_gl_context_cb_(get_gl_context_cb), make_context_current_cb_(make_context_current_cb), video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN), input_format_fourcc_(0), output_format_fourcc_(0), egl_image_format_fourcc_(0), egl_image_planes_count_(0), weak_this_factory_(this) { weak_this_ = weak_this_factory_.GetWeakPtr(); } V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() { DCHECK(!decoder_thread_.IsRunning()); DCHECK(!device_poll_thread_.IsRunning()); DVLOGF(2); // These maps have members that should be manually destroyed, e.g. file // descriptors, mmap() segments, etc. DCHECK(input_buffer_map_.empty()); DCHECK(output_buffer_map_.empty()); } bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, Client* client) { VLOGF(2) << "profile: " << config.profile << ", output_mode=" << static_cast<int>(config.output_mode); DCHECK(child_task_runner_->BelongsToCurrentThread()); DCHECK_EQ(decoder_state_, kUninitialized); if (config.is_encrypted()) { NOTREACHED() << "Encrypted streams are not supported for this VDA"; return false; } if (config.output_mode != Config::OutputMode::ALLOCATE && config.output_mode != Config::OutputMode::IMPORT) { NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported"; return false; } client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client)); client_ = client_ptr_factory_->GetWeakPtr(); // If we haven't been set up to decode on separate thread via // TryToSetupDecodeOnSeparateThread(), use the main thread/client for // decode tasks. if (!decode_task_runner_) { decode_task_runner_ = child_task_runner_; DCHECK(!decode_client_); decode_client_ = client_; } video_profile_ = config.profile; // We need the context to be initialized to query extensions. if (!make_context_current_cb_.is_null()) { if (egl_display_ == EGL_NO_DISPLAY) { VLOGF(1) << "could not get EGLDisplay"; return false; } if (!make_context_current_cb_.Run()) { VLOGF(1) << "could not make context current"; return false; } // TODO(posciak): https://crbug.com/450898. #if defined(ARCH_CPU_ARMEL) if (!gl::g_driver_egl.ext.b_EGL_KHR_fence_sync) { VLOGF(1) << "context does not have EGL_KHR_fence_sync"; return false; } #endif } else { DVLOGF(2) << "No GL callbacks provided, initializing without GL support"; } input_format_fourcc_ = V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, false); if (!device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc_)) { VLOGF(1) << "Failed to open device for profile: " << config.profile << " fourcc: " << std::hex << "0x" << input_format_fourcc_; return false; } // Capabilities check. struct v4l2_capability caps; const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps); if ((caps.capabilities & kCapsRequired) != kCapsRequired) { VLOGF(1) << "ioctl() failed: VIDIOC_QUERYCAP" << ", caps check failed: 0x" << std::hex << caps.capabilities; return false; } if (!SetupFormats()) return false; if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) { decoder_h264_parser_.reset(new H264Parser()); } if (!decoder_thread_.Start()) { VLOGF(1) << "decoder thread failed to start"; return false; } decoder_state_ = kInitialized; output_mode_ = config.output_mode; // InitializeTask will NOTIFY_ERROR on failure. decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::InitializeTask, base::Unretained(this))); return true; } void V4L2VideoDecodeAccelerator::InitializeTask() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_EQ(decoder_state_, kInitialized); // Subscribe to the resolution change event. struct v4l2_event_subscription sub; memset(&sub, 0, sizeof(sub)); sub.type = V4L2_EVENT_SOURCE_CHANGE; IOCTL_OR_ERROR_RETURN(VIDIOC_SUBSCRIBE_EVENT, &sub); if (!CreateInputBuffers()) { NOTIFY_ERROR(PLATFORM_FAILURE); return; } decoder_cmd_supported_ = IsDecoderCmdSupported(); if (!StartDevicePoll()) return; } void V4L2VideoDecodeAccelerator::Decode( const BitstreamBuffer& bitstream_buffer) { DVLOGF(4) << "input_id=" << bitstream_buffer.id() << ", size=" << bitstream_buffer.size(); DCHECK(decode_task_runner_->BelongsToCurrentThread()); if (bitstream_buffer.id() < 0) { VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id(); if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle())) base::SharedMemory::CloseHandle(bitstream_buffer.handle()); NOTIFY_ERROR(INVALID_ARGUMENT); return; } // DecodeTask() will take care of running a DecodeBufferTask(). decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeTask, base::Unretained(this), bitstream_buffer)); } void V4L2VideoDecodeAccelerator::AssignPictureBuffers( const std::vector<PictureBuffer>& buffers) { VLOGF(2) << "buffer_count=" << buffers.size(); DCHECK(child_task_runner_->BelongsToCurrentThread()); decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::AssignPictureBuffersTask, base::Unretained(this), buffers)); } void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask( const std::vector<PictureBuffer>& buffers) { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_EQ(decoder_state_, kAwaitingPictureBuffers); uint32_t req_buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount; if (image_processor_device_) req_buffer_count += kDpbOutputBufferExtraCountForImageProcessor; if (buffers.size() < req_buffer_count) { VLOGF(1) << "Failed to provide requested picture buffers. (Got " << buffers.size() << ", requested " << req_buffer_count << ")"; NOTIFY_ERROR(INVALID_ARGUMENT); return; } // Allocate the output buffers. struct v4l2_requestbuffers reqbufs; memset(&reqbufs, 0, sizeof(reqbufs)); reqbufs.count = buffers.size(); reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; reqbufs.memory = V4L2_MEMORY_MMAP; IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs); if (reqbufs.count != buffers.size()) { VLOGF(1) << "Could not allocate enough output buffers"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } DCHECK(free_output_buffers_.empty()); DCHECK(output_buffer_map_.empty()); output_buffer_map_.resize(buffers.size()); if (image_processor_device_ && output_mode_ == Config::OutputMode::ALLOCATE) { if (!CreateImageProcessor()) return; } for (size_t i = 0; i < output_buffer_map_.size(); ++i) { DCHECK(buffers[i].size() == egl_image_size_); OutputRecord& output_record = output_buffer_map_[i]; DCHECK_EQ(output_record.state, kFree); DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); DCHECK_EQ(output_record.picture_id, -1); DCHECK_EQ(output_record.cleared, false); DCHECK(output_record.processor_input_fds.empty()); output_record.picture_id = buffers[i].id(); output_record.texture_id = buffers[i].service_texture_ids().empty() ? 0 : buffers[i].service_texture_ids()[0]; // This will remain kAtClient until ImportBufferForPicture is called, either // by the client, or by ourselves, if we are allocating. output_record.state = kAtClient; if (image_processor_device_) { std::vector<base::ScopedFD> dmabuf_fds = device_->GetDmabufsForV4L2Buffer( i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (dmabuf_fds.empty()) { VLOGF(1) << "Failed to get DMABUFs of decoder."; NOTIFY_ERROR(PLATFORM_FAILURE); return; } output_record.processor_input_fds = std::move(dmabuf_fds); } if (output_mode_ == Config::OutputMode::ALLOCATE) { std::vector<base::ScopedFD> dmabuf_fds; dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer( i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (dmabuf_fds.empty()) { VLOGF(1) << "Failed to get DMABUFs for EGLImage."; NOTIFY_ERROR(PLATFORM_FAILURE); return; } int plane_horiz_bits_per_pixel = VideoFrame::PlaneHorizontalBitsPerPixel( V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), 0); ImportBufferForPictureTask( output_record.picture_id, std::move(dmabuf_fds), egl_image_size_.width() * plane_horiz_bits_per_pixel / 8); } // else we'll get triggered via ImportBufferForPicture() from client. DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id; } if (output_mode_ == Config::OutputMode::ALLOCATE) { DCHECK_EQ(kAwaitingPictureBuffers, decoder_state_); DVLOGF(3) << "Change state to kDecoding"; decoder_state_ = kDecoding; if (reset_pending_) { FinishReset(); return; } ScheduleDecodeBufferTaskIfNeeded(); } } void V4L2VideoDecodeAccelerator::CreateEGLImageFor( size_t buffer_index, int32_t picture_buffer_id, std::vector<base::ScopedFD> dmabuf_fds, GLuint texture_id, const gfx::Size& size, uint32_t fourcc) { DVLOGF(3) << "index=" << buffer_index; DCHECK(child_task_runner_->BelongsToCurrentThread()); DCHECK_NE(texture_id, 0u); if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { VLOGF(1) << "GL callbacks required for binding to EGLImages"; NOTIFY_ERROR(INVALID_ARGUMENT); return; } gl::GLContext* gl_context = get_gl_context_cb_.Run(); if (!gl_context || !make_context_current_cb_.Run()) { VLOGF(1) << "No GL context"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } gl::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); EGLImageKHR egl_image = egl_image_device_->CreateEGLImage( egl_display_, gl_context->GetHandle(), texture_id, size, buffer_index, fourcc, dmabuf_fds); if (egl_image == EGL_NO_IMAGE_KHR) { VLOGF(1) << "could not create EGLImageKHR," << " index=" << buffer_index << " texture_id=" << texture_id; NOTIFY_ERROR(PLATFORM_FAILURE); return; } decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::AssignEGLImage, base::Unretained(this), buffer_index, picture_buffer_id, egl_image, base::Passed(&dmabuf_fds))); } void V4L2VideoDecodeAccelerator::AssignEGLImage( size_t buffer_index, int32_t picture_buffer_id, EGLImageKHR egl_image, std::vector<base::ScopedFD> dmabuf_fds) { DVLOGF(3) << "index=" << buffer_index << ", picture_id=" << picture_buffer_id; DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); // It's possible that while waiting for the EGLImages to be allocated and // assigned, we have already decoded more of the stream and saw another // resolution change. This is a normal situation, in such a case either there // is no output record with this index awaiting an EGLImage to be assigned to // it, or the record is already updated to use a newer PictureBuffer and is // awaiting an EGLImage associated with a different picture_buffer_id. If so, // just discard this image, we will get the one we are waiting for later. if (buffer_index >= output_buffer_map_.size() || output_buffer_map_[buffer_index].picture_id != picture_buffer_id) { DVLOGF(4) << "Picture set already changed, dropping EGLImage"; child_task_runner_->PostTask( FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, egl_display_, egl_image)); return; } OutputRecord& output_record = output_buffer_map_[buffer_index]; DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); DCHECK_EQ(output_record.state, kFree); DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), buffer_index), 0); output_record.egl_image = egl_image; free_output_buffers_.push_back(buffer_index); if (decoder_state_ != kChangingResolution) { Enqueue(); ScheduleDecodeBufferTaskIfNeeded(); } } void V4L2VideoDecodeAccelerator::ImportBufferForPicture( int32_t picture_buffer_id, VideoPixelFormat pixel_format, const gfx::GpuMemoryBufferHandle& gpu_memory_buffer_handle) { DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; DCHECK(child_task_runner_->BelongsToCurrentThread()); if (output_mode_ != Config::OutputMode::IMPORT) { VLOGF(1) << "Cannot import in non-import mode"; NOTIFY_ERROR(INVALID_ARGUMENT); return; } // |output_format_fourcc_| is the output format of the decoder. It is not // the final output format from the image processor (if exists). // Use |egl_image_format_fourcc_|, it will be the final output format. if (pixel_format != V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_)) { VLOGF(1) << "Unsupported import format: " << pixel_format; NOTIFY_ERROR(INVALID_ARGUMENT); return; } std::vector<base::ScopedFD> dmabuf_fds; int32_t stride = 0; #if defined(USE_OZONE) for (const auto& fd : gpu_memory_buffer_handle.native_pixmap_handle.fds) { DCHECK_NE(fd.fd, -1); dmabuf_fds.push_back(base::ScopedFD(fd.fd)); } stride = gpu_memory_buffer_handle.native_pixmap_handle.planes[0].stride; for (const auto& plane : gpu_memory_buffer_handle.native_pixmap_handle.planes) { DVLOGF(3) << ": offset=" << plane.offset << ", stride=" << plane.stride; } #endif decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ImportBufferForPictureTask, base::Unretained(this), picture_buffer_id, base::Passed(&dmabuf_fds), stride)); } void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask( int32_t picture_buffer_id, std::vector<base::ScopedFD> dmabuf_fds, int32_t stride) { DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id << ", dmabuf_fds.size()=" << dmabuf_fds.size() << ", stride=" << stride; DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); const auto iter = std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(), [picture_buffer_id](const OutputRecord& output_record) { return output_record.picture_id == picture_buffer_id; }); if (iter == output_buffer_map_.end()) { // It's possible that we've already posted a DismissPictureBuffer for this // picture, but it has not yet executed when this ImportBufferForPicture was // posted to us by the client. In that case just ignore this (we've already // dismissed it and accounted for that). DVLOGF(3) << "got picture id=" << picture_buffer_id << " not in use (anymore?)."; return; } if (iter->state != kAtClient) { VLOGF(1) << "Cannot import buffer not owned by client"; NOTIFY_ERROR(INVALID_ARGUMENT); return; } int plane_horiz_bits_per_pixel = VideoFrame::PlaneHorizontalBitsPerPixel( V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), 0); if (plane_horiz_bits_per_pixel == 0 || (stride * 8) % plane_horiz_bits_per_pixel != 0) { VLOGF(1) << "Invalid format " << egl_image_format_fourcc_ << " or stride " << stride; NOTIFY_ERROR(INVALID_ARGUMENT); return; } int adjusted_coded_width = stride * 8 / plane_horiz_bits_per_pixel; if (image_processor_device_ && !image_processor_) { // This is the first buffer import. Create the image processor and change // the decoder state. The client may adjust the coded width. We don't have // the final coded size in AssignPictureBuffers yet. Use the adjusted coded // width to create the image processor. VLOGF(2) << "Original egl_image_size=" << egl_image_size_.ToString() << ", adjusted coded width=" << adjusted_coded_width; DCHECK_GE(adjusted_coded_width, egl_image_size_.width()); egl_image_size_.set_width(adjusted_coded_width); if (!CreateImageProcessor()) return; DCHECK_EQ(kAwaitingPictureBuffers, decoder_state_); DVLOGF(3) << "Change state to kDecoding"; decoder_state_ = kDecoding; if (reset_pending_) { FinishReset(); } } else { DCHECK_EQ(egl_image_size_.width(), adjusted_coded_width); } size_t index = iter - output_buffer_map_.begin(); DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), index), 0); iter->state = kFree; if (iter->texture_id != 0) { if (iter->egl_image != EGL_NO_IMAGE_KHR) { child_task_runner_->PostTask( FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, egl_display_, iter->egl_image)); } child_task_runner_->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::CreateEGLImageFor, weak_this_, index, picture_buffer_id, base::Passed(&dmabuf_fds), iter->texture_id, egl_image_size_, egl_image_format_fourcc_)); } else { // No need for an EGLImage, start using this buffer now. DCHECK_EQ(egl_image_planes_count_, dmabuf_fds.size()); iter->processor_output_fds.swap(dmabuf_fds); free_output_buffers_.push_back(index); if (decoder_state_ != kChangingResolution) { Enqueue(); ScheduleDecodeBufferTaskIfNeeded(); } } } void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id; // Must be run on child thread, as we'll insert a sync in the EGL context. DCHECK(child_task_runner_->BelongsToCurrentThread()); std::unique_ptr<EGLSyncKHRRef> egl_sync_ref; if (!make_context_current_cb_.is_null()) { if (!make_context_current_cb_.Run()) { VLOGF(1) << "could not make context current"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR; // TODO(posciak): https://crbug.com/450898. #if defined(ARCH_CPU_ARMEL) egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL); if (egl_sync == EGL_NO_SYNC_KHR) { VLOGF(1) << "eglCreateSyncKHR() failed"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } #endif egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync)); } decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask, base::Unretained(this), picture_buffer_id, base::Passed(&egl_sync_ref))); } void V4L2VideoDecodeAccelerator::Flush() { VLOGF(2); DCHECK(child_task_runner_->BelongsToCurrentThread()); decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::FlushTask, base::Unretained(this))); } void V4L2VideoDecodeAccelerator::Reset() { VLOGF(2); DCHECK(child_task_runner_->BelongsToCurrentThread()); decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetTask, base::Unretained(this))); } void V4L2VideoDecodeAccelerator::Destroy() { VLOGF(2); DCHECK(child_task_runner_->BelongsToCurrentThread()); // We're destroying; cancel all callbacks. client_ptr_factory_.reset(); weak_this_factory_.InvalidateWeakPtrs(); // If the decoder thread is running, destroy using posted task. if (decoder_thread_.IsRunning()) { decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DestroyTask, base::Unretained(this))); // DestroyTask() will cause the decoder_thread_ to flush all tasks. decoder_thread_.Stop(); } else { // Otherwise, call the destroy task directly. DestroyTask(); } delete this; VLOGF(2) << "Destroyed."; } bool V4L2VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( const base::WeakPtr<Client>& decode_client, const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { VLOGF(2); decode_client_ = decode_client; decode_task_runner_ = decode_task_runner; return true; } // static VideoDecodeAccelerator::SupportedProfiles V4L2VideoDecodeAccelerator::GetSupportedProfiles() { scoped_refptr<V4L2Device> device = V4L2Device::Create(); if (!device) return SupportedProfiles(); return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_), supported_input_fourccs_); } void V4L2VideoDecodeAccelerator::DecodeTask( const BitstreamBuffer& bitstream_buffer) { DVLOGF(4) << "input_id=" << bitstream_buffer.id(); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); TRACE_EVENT1("media,gpu", "V4L2VDA::DecodeTask", "input_id", bitstream_buffer.id()); std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef( decode_client_, decode_task_runner_, std::unique_ptr<SharedMemoryRegion>( new SharedMemoryRegion(bitstream_buffer, true)), bitstream_buffer.id())); // Skip empty buffer. if (bitstream_buffer.size() == 0) return; if (!bitstream_record->shm->Map()) { VLOGF(1) << "could not map bitstream_buffer"; NOTIFY_ERROR(UNREADABLE_INPUT); return; } DVLOGF(4) << "mapped at=" << bitstream_record->shm->memory(); if (decoder_state_ == kResetting || decoder_flushing_) { // In the case that we're resetting or flushing, we need to delay decoding // the BitstreamBuffers that come after the Reset() or Flush() call. When // we're here, we know that this DecodeTask() was scheduled by a Decode() // call that came after (in the client thread) the Reset() or Flush() call; // thus set up the delay if necessary. if (decoder_delay_bitstream_buffer_id_ == -1) decoder_delay_bitstream_buffer_id_ = bitstream_record->input_id; } else if (decoder_state_ == kError) { VLOGF(2) << "early out: kError state"; return; } decoder_input_queue_.push(std::move(bitstream_record)); decoder_decode_buffer_tasks_scheduled_++; DecodeBufferTask(); } void V4L2VideoDecodeAccelerator::DecodeBufferTask() { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); TRACE_EVENT0("media,gpu", "V4L2VDA::DecodeBufferTask"); decoder_decode_buffer_tasks_scheduled_--; if (decoder_state_ != kInitialized && decoder_state_ != kDecoding) { DVLOGF(3) << "early out: state=" << decoder_state_; return; } if (decoder_current_bitstream_buffer_ == NULL) { if (decoder_input_queue_.empty()) { // We're waiting for a new buffer -- exit without scheduling a new task. return; } if (decoder_delay_bitstream_buffer_id_ == decoder_input_queue_.front()->input_id) { // We're asked to delay decoding on this and subsequent buffers. return; } // Setup to use the next buffer. decoder_current_bitstream_buffer_ = std::move(decoder_input_queue_.front()); decoder_input_queue_.pop(); const auto& shm = decoder_current_bitstream_buffer_->shm; if (shm) { DVLOGF(4) << "reading input_id=" << decoder_current_bitstream_buffer_->input_id << ", addr=" << shm->memory() << ", size=" << shm->size(); } else { DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId); DVLOGF(4) << "reading input_id=kFlushBufferId"; } } bool schedule_task = false; size_t decoded_size = 0; const auto& shm = decoder_current_bitstream_buffer_->shm; if (!shm) { // This is a dummy buffer, queued to flush the pipe. Flush. DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId); // Enqueue a buffer guaranteed to be empty. To do that, we flush the // current input, enqueue no data to the next frame, then flush that down. schedule_task = true; if (decoder_current_input_buffer_ != -1 && input_buffer_map_[decoder_current_input_buffer_].input_id != kFlushBufferId) schedule_task = FlushInputFrame(); if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) { VLOGF(2) << "enqueued flush buffer"; decoder_partial_frame_pending_ = false; schedule_task = true; } else { // If we failed to enqueue the empty buffer (due to pipeline // backpressure), don't advance the bitstream buffer queue, and don't // schedule the next task. This bitstream buffer queue entry will get // reprocessed when the pipeline frees up. schedule_task = false; } } else if (shm->size() == 0) { // This is a buffer queued from the client that has zero size. Skip. schedule_task = true; } else { // This is a buffer queued from the client, with actual contents. Decode. const uint8_t* const data = reinterpret_cast<const uint8_t*>(shm->memory()) + decoder_current_bitstream_buffer_->bytes_used; const size_t data_size = shm->size() - decoder_current_bitstream_buffer_->bytes_used; if (!AdvanceFrameFragment(data, data_size, &decoded_size)) { NOTIFY_ERROR(UNREADABLE_INPUT); return; } // AdvanceFrameFragment should not return a size larger than the buffer // size, even on invalid data. CHECK_LE(decoded_size, data_size); switch (decoder_state_) { case kInitialized: schedule_task = DecodeBufferInitial(data, decoded_size, &decoded_size); break; case kDecoding: schedule_task = DecodeBufferContinue(data, decoded_size); break; default: NOTIFY_ERROR(ILLEGAL_STATE); return; } } if (decoder_state_ == kError) { // Failed during decode. return; } if (schedule_task) { decoder_current_bitstream_buffer_->bytes_used += decoded_size; if ((shm ? shm->size() : 0) == decoder_current_bitstream_buffer_->bytes_used) { // Our current bitstream buffer is done; return it. int32_t input_id = decoder_current_bitstream_buffer_->input_id; DVLOGF(4) << "finished input_id=" << input_id; // BitstreamBufferRef destructor calls NotifyEndOfBitstreamBuffer(). decoder_current_bitstream_buffer_.reset(); } ScheduleDecodeBufferTaskIfNeeded(); } } bool V4L2VideoDecodeAccelerator::AdvanceFrameFragment(const uint8_t* data, size_t size, size_t* endpos) { if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) { // For H264, we need to feed HW one frame at a time. This is going to take // some parsing of our input stream. decoder_h264_parser_->SetStream(data, size); H264NALU nalu; H264Parser::Result result; *endpos = 0; // Keep on peeking the next NALs while they don't indicate a frame // boundary. for (;;) { bool end_of_frame = false; result = decoder_h264_parser_->AdvanceToNextNALU(&nalu); if (result == H264Parser::kInvalidStream || result == H264Parser::kUnsupportedStream) return false; if (result == H264Parser::kEOStream) { // We've reached the end of the buffer before finding a frame boundary. decoder_partial_frame_pending_ = true; *endpos = size; return true; } switch (nalu.nal_unit_type) { case H264NALU::kNonIDRSlice: case H264NALU::kIDRSlice: if (nalu.size < 1) return false; // For these two, if the "first_mb_in_slice" field is zero, start a // new frame and return. This field is Exp-Golomb coded starting on // the eighth data bit of the NAL; a zero value is encoded with a // leading '1' bit in the byte, which we can detect as the byte being // (unsigned) greater than or equal to 0x80. if (nalu.data[1] >= 0x80) { end_of_frame = true; break; } break; case H264NALU::kSEIMessage: case H264NALU::kSPS: case H264NALU::kPPS: case H264NALU::kAUD: case H264NALU::kEOSeq: case H264NALU::kEOStream: case H264NALU::kReserved14: case H264NALU::kReserved15: case H264NALU::kReserved16: case H264NALU::kReserved17: case H264NALU::kReserved18: // These unconditionally signal a frame boundary. end_of_frame = true; break; default: // For all others, keep going. break; } if (end_of_frame) { if (!decoder_partial_frame_pending_ && *endpos == 0) { // The frame was previously restarted, and we haven't filled the // current frame with any contents yet. Start the new frame here and // continue parsing NALs. } else { // The frame wasn't previously restarted and/or we have contents for // the current frame; signal the start of a new frame here: we don't // have a partial frame anymore. decoder_partial_frame_pending_ = false; return true; } } *endpos = (nalu.data + nalu.size) - data; } NOTREACHED(); return false; } else { DCHECK_GE(video_profile_, VP8PROFILE_MIN); DCHECK_LE(video_profile_, VP9PROFILE_MAX); // For VP8/9, we can just dump the entire buffer. No fragmentation needed, // and we never return a partial frame. *endpos = size; decoder_partial_frame_pending_ = false; return true; } } void V4L2VideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); // If we're behind on tasks, schedule another one. int buffers_to_decode = decoder_input_queue_.size(); if (decoder_current_bitstream_buffer_ != NULL) buffers_to_decode++; if (decoder_decode_buffer_tasks_scheduled_ < buffers_to_decode) { decoder_decode_buffer_tasks_scheduled_++; decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeBufferTask, base::Unretained(this))); } } bool V4L2VideoDecodeAccelerator::DecodeBufferInitial(const void* data, size_t size, size_t* endpos) { DVLOGF(3) << "data=" << data << ", size=" << size; DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_EQ(decoder_state_, kInitialized); // Initial decode. We haven't been able to get output stream format info yet. // Get it, and start decoding. // Copy in and send to HW. if (!AppendToInputFrame(data, size)) return false; // If we only have a partial frame, don't flush and process yet. if (decoder_partial_frame_pending_) return true; if (!FlushInputFrame()) return false; // Recycle buffers. Dequeue(); *endpos = size; // If an initial resolution change event is not done yet, a driver probably // needs more stream to decode format. // Return true and schedule next buffer without changing status to kDecoding. // If the initial resolution change is done and coded size is known, we may // still have to wait for AssignPictureBuffers() and output buffers to be // allocated. if (coded_size_.IsEmpty() || output_buffer_map_.empty()) { return true; } decoder_state_ = kDecoding; ScheduleDecodeBufferTaskIfNeeded(); return true; } bool V4L2VideoDecodeAccelerator::DecodeBufferContinue(const void* data, size_t size) { DVLOGF(4) << "data=" << data << ", size=" << size; DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_EQ(decoder_state_, kDecoding); // Both of these calls will set kError state if they fail. // Only flush the frame if it's complete. return (AppendToInputFrame(data, size) && (decoder_partial_frame_pending_ || FlushInputFrame())); } bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data, size_t size) { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); DCHECK_NE(decoder_state_, kResetting); DCHECK_NE(decoder_state_, kError); // This routine can handle data == NULL and size == 0, which occurs when // we queue an empty buffer for the purposes of flushing the pipe. // Flush if we're too big if (decoder_current_input_buffer_ != -1) { InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_]; if (input_record.bytes_used + size > input_record.length) { if (!FlushInputFrame()) return false; decoder_current_input_buffer_ = -1; } } // Try to get an available input buffer if (decoder_current_input_buffer_ == -1) { if (free_input_buffers_.empty()) { // See if we can get more free buffers from HW Dequeue(); if (free_input_buffers_.empty()) { // Nope! DVLOGF(4) << "stalled for input buffers"; return false; } } decoder_current_input_buffer_ = free_input_buffers_.back(); free_input_buffers_.pop_back(); InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_]; DCHECK_EQ(input_record.bytes_used, 0); DCHECK_EQ(input_record.input_id, -1); DCHECK(decoder_current_bitstream_buffer_ != NULL); input_record.input_id = decoder_current_bitstream_buffer_->input_id; } DCHECK(data != NULL || size == 0); if (size == 0) { // If we asked for an empty buffer, return now. We return only after // getting the next input buffer, since we might actually want an empty // input buffer for flushing purposes. return true; } // Copy in to the buffer. InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_]; if (size > input_record.length - input_record.bytes_used) { VLOGF(1) << "over-size frame, erroring"; NOTIFY_ERROR(UNREADABLE_INPUT); return false; } memcpy(reinterpret_cast<uint8_t*>(input_record.address) + input_record.bytes_used, data, size); input_record.bytes_used += size; return true; } bool V4L2VideoDecodeAccelerator::FlushInputFrame() { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); DCHECK_NE(decoder_state_, kResetting); DCHECK_NE(decoder_state_, kError); if (decoder_current_input_buffer_ == -1) return true; InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_]; DCHECK_NE(input_record.input_id, -1); DCHECK(input_record.input_id != kFlushBufferId || input_record.bytes_used == 0); // * if input_id >= 0, this input buffer was prompted by a bitstream buffer we // got from the client. We can skip it if it is empty. // * if input_id < 0 (should be kFlushBufferId in this case), this input // buffer was prompted by a flush buffer, and should be queued even when // empty. if (input_record.input_id >= 0 && input_record.bytes_used == 0) { input_record.input_id = -1; free_input_buffers_.push_back(decoder_current_input_buffer_); decoder_current_input_buffer_ = -1; return true; } // Queue it. input_ready_queue_.push(decoder_current_input_buffer_); decoder_current_input_buffer_ = -1; DVLOGF(4) << "submitting input_id=" << input_record.input_id; // Enqueue once since there's new available input for it. Enqueue(); return (decoder_state_ != kError); } void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); TRACE_EVENT0("media,gpu", "V4L2VDA::ServiceDeviceTask"); if (decoder_state_ == kResetting) { DVLOGF(3) << "early out: kResetting state"; return; } else if (decoder_state_ == kError) { DVLOGF(3) << "early out: kError state"; return; } else if (decoder_state_ == kChangingResolution) { DVLOGF(3) << "early out: kChangingResolution state"; return; } bool resolution_change_pending = false; if (event_pending) resolution_change_pending = DequeueResolutionChangeEvent(); if (!resolution_change_pending && coded_size_.IsEmpty()) { // Some platforms do not send an initial resolution change event. // To work around this, we need to keep checking if the initial resolution // is known already by explicitly querying the format after each decode, // regardless of whether we received an event. // This needs to be done on initial resolution change, // i.e. when coded_size_.IsEmpty(). // Try GetFormatInfo to check if an initial resolution change can be done. struct v4l2_format format; gfx::Size visible_size; bool again; if (GetFormatInfo(&format, &visible_size, &again) && !again) { resolution_change_pending = true; DequeueResolutionChangeEvent(); } } Dequeue(); Enqueue(); // Clear the interrupt fd. if (!device_->ClearDevicePollInterrupt()) { NOTIFY_ERROR(PLATFORM_FAILURE); return; } bool poll_device = false; // Add fd, if we should poll on it. // Can be polled as soon as either input or output buffers are queued. if (input_buffer_queued_count_ + output_buffer_queued_count_ > 0) poll_device = true; // ServiceDeviceTask() should only ever be scheduled from DevicePollTask(), // so either: // * device_poll_thread_ is running normally // * device_poll_thread_ scheduled us, but then a ResetTask() or DestroyTask() // shut it down, in which case we're either in kResetting or kError states // respectively, and we should have early-outed already. DCHECK(device_poll_thread_.message_loop()); // Queue the DevicePollTask() now. device_poll_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask, base::Unretained(this), poll_device)); DVLOGF(3) << "ServiceDeviceTask(): buffer counts: DEC[" << decoder_input_queue_.size() << "->" << input_ready_queue_.size() << "] => DEVICE[" << free_input_buffers_.size() << "+" << input_buffer_queued_count_ << "/" << input_buffer_map_.size() << "->" << free_output_buffers_.size() << "+" << output_buffer_queued_count_ << "/" << output_buffer_map_.size() << "] => PROCESSOR[" << image_processor_bitstream_buffer_ids_.size() << "] => CLIENT[" << decoder_frames_at_client_ << "]"; ScheduleDecodeBufferTaskIfNeeded(); if (resolution_change_pending) StartResolutionChange(); } void V4L2VideoDecodeAccelerator::Enqueue() { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); TRACE_EVENT0("media,gpu", "V4L2VDA::Enqueue"); // Drain the pipe of completed decode buffers. const int old_inputs_queued = input_buffer_queued_count_; while (!input_ready_queue_.empty()) { const int buffer = input_ready_queue_.front(); InputRecord& input_record = input_buffer_map_[buffer]; if (input_record.input_id == kFlushBufferId && decoder_cmd_supported_) { // Send the flush command after all input buffers are dequeued. This makes // sure all previous resolution changes have been handled because the // driver must hold the input buffer that triggers resolution change. The // driver cannot decode data in it without new output buffers. If we send // the flush now and a queued input buffer triggers resolution change // later, the driver will send an output buffer that has // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START // to the decoder. if (input_buffer_queued_count_ == 0) { input_ready_queue_.pop(); free_input_buffers_.push_back(buffer); input_record.input_id = -1; if (coded_size_.IsEmpty()) { // If coded_size_.IsEmpty(), no output buffer could have been // allocated and there is nothing to flush. We can NotifyFlushDone() // immediately, without requesting flush to the driver via // SendDecoderCmdStop(). NotifyFlushDoneIfNeeded(); } else if (!SendDecoderCmdStop()) { return; } } else { break; } } else if (!EnqueueInputRecord()) return; } if (old_inputs_queued == 0 && input_buffer_queued_count_ != 0) { // We just started up a previously empty queue. // Queue state changed; signal interrupt. if (!device_->SetDevicePollInterrupt()) { VPLOGF(1) << "SetDevicePollInterrupt failed"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } // Start VIDIOC_STREAMON if we haven't yet. if (!input_streamon_) { __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type); input_streamon_ = true; } } // Enqueue all the outputs we can. const int old_outputs_queued = output_buffer_queued_count_; while (!free_output_buffers_.empty()) { if (!EnqueueOutputRecord()) return; } if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) { // We just started up a previously empty queue. // Queue state changed; signal interrupt. if (!device_->SetDevicePollInterrupt()) { VPLOGF(1) << "SetDevicePollInterrupt(): failed"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } // Start VIDIOC_STREAMON if we haven't yet. if (!output_streamon_) { __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type); output_streamon_ = true; } } } bool V4L2VideoDecodeAccelerator::DequeueResolutionChangeEvent() { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); DVLOGF(3); struct v4l2_event ev; memset(&ev, 0, sizeof(ev)); while (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) { if (ev.type == V4L2_EVENT_SOURCE_CHANGE) { if (ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) { VLOGF(2) << "got resolution change event."; return true; } } else { VLOGF(1) << "got an event (" << ev.type << ") we haven't subscribed to."; } } return false; } void V4L2VideoDecodeAccelerator::Dequeue() { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); TRACE_EVENT0("media,gpu", "V4L2VDA::Dequeue"); while (input_buffer_queued_count_ > 0) { if (!DequeueInputBuffer()) break; } while (output_buffer_queued_count_ > 0) { if (!DequeueOutputBuffer()) break; } NotifyFlushDoneIfNeeded(); } bool V4L2VideoDecodeAccelerator::DequeueInputBuffer() { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_GT(input_buffer_queued_count_, 0); DCHECK(input_streamon_); // Dequeue a completed input (VIDEO_OUTPUT) buffer, and recycle to the free // list. struct v4l2_buffer dqbuf; struct v4l2_plane planes[1]; memset(&dqbuf, 0, sizeof(dqbuf)); memset(planes, 0, sizeof(planes)); dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; dqbuf.memory = V4L2_MEMORY_MMAP; dqbuf.m.planes = planes; dqbuf.length = 1; if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { if (errno == EAGAIN) { // EAGAIN if we're just out of buffers to dequeue. return false; } VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } InputRecord& input_record = input_buffer_map_[dqbuf.index]; DCHECK(input_record.at_device); free_input_buffers_.push_back(dqbuf.index); input_record.at_device = false; input_record.bytes_used = 0; input_record.input_id = -1; input_buffer_queued_count_--; return true; } bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_GT(output_buffer_queued_count_, 0); DCHECK(output_streamon_); // Dequeue a completed output (VIDEO_CAPTURE) buffer, and queue to the // completed queue. struct v4l2_buffer dqbuf; std::unique_ptr<struct v4l2_plane[]> planes( new v4l2_plane[output_planes_count_]); memset(&dqbuf, 0, sizeof(dqbuf)); memset(planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_); dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; dqbuf.memory = V4L2_MEMORY_MMAP; dqbuf.m.planes = planes.get(); dqbuf.length = output_planes_count_; if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { if (errno == EAGAIN) { // EAGAIN if we're just out of buffers to dequeue. return false; } else if (errno == EPIPE) { DVLOGF(3) << "Got EPIPE. Last output buffer was already dequeued."; return false; } VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } OutputRecord& output_record = output_buffer_map_[dqbuf.index]; DCHECK_EQ(output_record.state, kAtDevice); DCHECK_NE(output_record.picture_id, -1); output_buffer_queued_count_--; if (dqbuf.m.planes[0].bytesused == 0) { // This is an empty output buffer returned as part of a flush. output_record.state = kFree; free_output_buffers_.push_back(dqbuf.index); } else { int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec; DCHECK_GE(bitstream_buffer_id, 0); DVLOGF(4) << "Dequeue output buffer: dqbuf index=" << dqbuf.index << " bitstream input_id=" << bitstream_buffer_id; if (image_processor_device_) { if (!ProcessFrame(bitstream_buffer_id, dqbuf.index)) { VLOGF(1) << "Processing frame failed"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } } else { output_record.state = kAtClient; decoder_frames_at_client_++; // TODO(hubbe): Insert correct color space. http://crbug.com/647725 const Picture picture(output_record.picture_id, bitstream_buffer_id, gfx::Rect(visible_size_), gfx::ColorSpace(), false); pending_picture_ready_.push( PictureRecord(output_record.cleared, picture)); SendPictureReady(); output_record.cleared = true; } } if (dqbuf.flags & V4L2_BUF_FLAG_LAST) { DVLOGF(3) << "Got last output buffer. Waiting last buffer=" << flush_awaiting_last_output_buffer_; if (flush_awaiting_last_output_buffer_) { flush_awaiting_last_output_buffer_ = false; struct v4l2_decoder_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd = V4L2_DEC_CMD_START; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd); } } return true; } bool V4L2VideoDecodeAccelerator::EnqueueInputRecord() { DVLOGF(4); DCHECK(!input_ready_queue_.empty()); // Enqueue an input (VIDEO_OUTPUT) buffer. const int buffer = input_ready_queue_.front(); InputRecord& input_record = input_buffer_map_[buffer]; DCHECK(!input_record.at_device); struct v4l2_buffer qbuf; struct v4l2_plane qbuf_plane; memset(&qbuf, 0, sizeof(qbuf)); memset(&qbuf_plane, 0, sizeof(qbuf_plane)); qbuf.index = buffer; qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; qbuf.timestamp.tv_sec = input_record.input_id; qbuf.memory = V4L2_MEMORY_MMAP; qbuf.m.planes = &qbuf_plane; qbuf.m.planes[0].bytesused = input_record.bytes_used; qbuf.length = 1; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); input_ready_queue_.pop(); input_record.at_device = true; input_buffer_queued_count_++; DVLOGF(4) << "enqueued input_id=" << input_record.input_id << " size=" << input_record.bytes_used; return true; } bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { DCHECK(!free_output_buffers_.empty()); // Enqueue an output (VIDEO_CAPTURE) buffer. const int buffer = free_output_buffers_.front(); DVLOGF(4) << "buffer " << buffer; OutputRecord& output_record = output_buffer_map_[buffer]; DCHECK_EQ(output_record.state, kFree); DCHECK_NE(output_record.picture_id, -1); if (output_record.egl_sync != EGL_NO_SYNC_KHR) { TRACE_EVENT0("media,gpu", "V4L2VDA::EnqueueOutputRecord: eglClientWaitSyncKHR"); // If we have to wait for completion, wait. Note that // free_output_buffers_ is a FIFO queue, so we always wait on the // buffer that has been in the queue the longest. if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0, EGL_FOREVER_KHR) == EGL_FALSE) { // This will cause tearing, but is safe otherwise. DVLOGF(1) << "eglClientWaitSyncKHR failed!"; } if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) { VLOGF(1) << "eglDestroySyncKHR failed!"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } output_record.egl_sync = EGL_NO_SYNC_KHR; } struct v4l2_buffer qbuf; std::unique_ptr<struct v4l2_plane[]> qbuf_planes( new v4l2_plane[output_planes_count_]); memset(&qbuf, 0, sizeof(qbuf)); memset(qbuf_planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_); qbuf.index = buffer; qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; qbuf.memory = V4L2_MEMORY_MMAP; qbuf.m.planes = qbuf_planes.get(); qbuf.length = output_planes_count_; DVLOGF(4) << "qbuf.index=" << qbuf.index; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); free_output_buffers_.pop_front(); output_record.state = kAtDevice; output_buffer_queued_count_++; return true; } void V4L2VideoDecodeAccelerator::ReusePictureBufferTask( int32_t picture_buffer_id, std::unique_ptr<EGLSyncKHRRef> egl_sync_ref) { DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id; DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); TRACE_EVENT0("media,gpu", "V4L2VDA::ReusePictureBufferTask"); // We run ReusePictureBufferTask even if we're in kResetting. if (decoder_state_ == kError) { DVLOGF(4) << "early out: kError state"; return; } if (decoder_state_ == kChangingResolution) { DVLOGF(4) << "early out: kChangingResolution"; return; } size_t index; for (index = 0; index < output_buffer_map_.size(); ++index) if (output_buffer_map_[index].picture_id == picture_buffer_id) break; if (index >= output_buffer_map_.size()) { // It's possible that we've already posted a DismissPictureBuffer for this // picture, but it has not yet executed when this ReusePictureBuffer was // posted to us by the client. In that case just ignore this (we've already // dismissed it and accounted for that) and let the sync object get // destroyed. DVLOGF(3) << "got picture id= " << picture_buffer_id << " not in use (anymore?)."; return; } OutputRecord& output_record = output_buffer_map_[index]; if (output_record.state != kAtClient) { VLOGF(1) << "picture_buffer_id not reusable"; NOTIFY_ERROR(INVALID_ARGUMENT); return; } DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); output_record.state = kFree; free_output_buffers_.push_back(index); decoder_frames_at_client_--; if (egl_sync_ref) { output_record.egl_sync = egl_sync_ref->egl_sync; // Take ownership of the EGLSync. egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; } // We got a buffer back, so enqueue it back. Enqueue(); } void V4L2VideoDecodeAccelerator::FlushTask() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); TRACE_EVENT0("media,gpu", "V4L2VDA::FlushTask"); if (decoder_state_ == kError) { VLOGF(2) << "early out: kError state"; return; } // We don't support stacked flushing. DCHECK(!decoder_flushing_); // Queue up an empty buffer -- this triggers the flush. decoder_input_queue_.push(std::make_unique<BitstreamBufferRef>( decode_client_, decode_task_runner_, nullptr, kFlushBufferId)); decoder_flushing_ = true; SendPictureReady(); // Send all pending PictureReady. ScheduleDecodeBufferTaskIfNeeded(); } void V4L2VideoDecodeAccelerator::NotifyFlushDoneIfNeeded() { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); if (!decoder_flushing_) return; // Pipeline is empty when: // * Decoder input queue is empty of non-delayed buffers. // * There is no currently filling input buffer. // * Input holding queue is empty. // * All input (VIDEO_OUTPUT) buffers are returned. // * All image processor buffers are returned. if (!decoder_input_queue_.empty()) { if (decoder_input_queue_.front()->input_id != decoder_delay_bitstream_buffer_id_) { DVLOGF(3) << "Some input bitstream buffers are not queued."; return; } } if (decoder_current_input_buffer_ != -1) { DVLOGF(3) << "Current input buffer != -1"; return; } if ((input_ready_queue_.size() + input_buffer_queued_count_) != 0) { DVLOGF(3) << "Some input buffers are not dequeued."; return; } if (image_processor_bitstream_buffer_ids_.size() != 0) { DVLOGF(3) << "Waiting for image processor to complete."; return; } if (flush_awaiting_last_output_buffer_) { DVLOGF(3) << "Waiting for last output buffer."; return; } // TODO(posciak): https://crbug.com/270039. Exynos requires a // streamoff-streamon sequence after flush to continue, even if we are not // resetting. This would make sense, because we don't really want to resume // from a non-resume point (e.g. not from an IDR) if we are flushed. // MSE player however triggers a Flush() on chunk end, but never Reset(). One // could argue either way, or even say that Flush() is not needed/harmful when // transitioning to next chunk. // For now, do the streamoff-streamon cycle to satisfy Exynos and not freeze // when doing MSE. This should be harmless otherwise. if (!(StopDevicePoll() && StopOutputStream() && StopInputStream())) return; if (!StartDevicePoll()) return; decoder_delay_bitstream_buffer_id_ = -1; decoder_flushing_ = false; VLOGF(2) << "returning flush"; child_task_runner_->PostTask(FROM_HERE, base::Bind(&Client::NotifyFlushDone, client_)); // While we were flushing, we early-outed DecodeBufferTask()s. ScheduleDecodeBufferTaskIfNeeded(); } bool V4L2VideoDecodeAccelerator::IsDecoderCmdSupported() { // CMD_STOP should always succeed. If the decoder is started, the command can // flush it. If the decoder is stopped, the command does nothing. We use this // to know if a driver supports V4L2_DEC_CMD_STOP to flush. struct v4l2_decoder_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd = V4L2_DEC_CMD_STOP; if (device_->Ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) { VLOGF(2) "V4L2_DEC_CMD_STOP is not supported."; return false; } return true; } bool V4L2VideoDecodeAccelerator::SendDecoderCmdStop() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK(!flush_awaiting_last_output_buffer_); struct v4l2_decoder_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd = V4L2_DEC_CMD_STOP; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd); flush_awaiting_last_output_buffer_ = true; return true; } void V4L2VideoDecodeAccelerator::ResetTask() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); TRACE_EVENT0("media,gpu", "V4L2VDA::ResetTask"); if (decoder_state_ == kError) { VLOGF(2) << "early out: kError state"; return; } decoder_current_bitstream_buffer_.reset(); while (!decoder_input_queue_.empty()) decoder_input_queue_.pop(); decoder_current_input_buffer_ = -1; // If we are in the middle of switching resolutions or awaiting picture // buffers, postpone reset until it's done. We don't have to worry about // timing of this wrt to decoding, because output pipe is already // stopped if we are changing resolution. We will come back here after // we are done. DCHECK(!reset_pending_); if (decoder_state_ == kChangingResolution || decoder_state_ == kAwaitingPictureBuffers) { reset_pending_ = true; return; } FinishReset(); } void V4L2VideoDecodeAccelerator::FinishReset() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); reset_pending_ = false; // After the output stream is stopped, the codec should not post any // resolution change events. So we dequeue the resolution change event // afterwards. The event could be posted before or while stopping the output // stream. The codec will expect the buffer of new size after the seek, so // we need to handle the resolution change event first. if (!(StopDevicePoll() && StopOutputStream())) return; if (DequeueResolutionChangeEvent()) { reset_pending_ = true; StartResolutionChange(); return; } if (!StopInputStream()) return; // Drop all buffers in image processor. if (image_processor_ && !ResetImageProcessor()) { VLOGF(1) << "Fail to reset image processor"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } // If we were flushing, we'll never return any more BitstreamBuffers or // PictureBuffers; they have all been dropped and returned by now. NotifyFlushDoneIfNeeded(); // Mark that we're resetting, then enqueue a ResetDoneTask(). All intervening // jobs will early-out in the kResetting state. decoder_state_ = kResetting; SendPictureReady(); // Send all pending PictureReady. decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetDoneTask, base::Unretained(this))); } void V4L2VideoDecodeAccelerator::ResetDoneTask() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); TRACE_EVENT0("media,gpu", "V4L2VDA::ResetDoneTask"); if (decoder_state_ == kError) { VLOGF(2) << "early out: kError state"; return; } // Start poll thread if NotifyFlushDoneIfNeeded has not already. if (!device_poll_thread_.IsRunning()) { if (!StartDevicePoll()) return; } // Reset format-specific bits. if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) { decoder_h264_parser_.reset(new H264Parser()); } // Jobs drained, we're finished resetting. DCHECK_EQ(decoder_state_, kResetting); decoder_state_ = kInitialized; decoder_partial_frame_pending_ = false; decoder_delay_bitstream_buffer_id_ = -1; child_task_runner_->PostTask(FROM_HERE, base::Bind(&Client::NotifyResetDone, client_)); // While we were resetting, we early-outed DecodeBufferTask()s. ScheduleDecodeBufferTaskIfNeeded(); } void V4L2VideoDecodeAccelerator::DestroyTask() { VLOGF(2); TRACE_EVENT0("media,gpu", "V4L2VDA::DestroyTask"); // DestroyTask() should run regardless of decoder_state_. StopDevicePoll(); StopOutputStream(); StopInputStream(); decoder_current_bitstream_buffer_.reset(); decoder_current_input_buffer_ = -1; decoder_decode_buffer_tasks_scheduled_ = 0; decoder_frames_at_client_ = 0; while (!decoder_input_queue_.empty()) decoder_input_queue_.pop(); decoder_flushing_ = false; if (image_processor_) image_processor_.release()->Destroy(); // Set our state to kError. Just in case. decoder_state_ = kError; DestroyInputBuffers(); DestroyOutputBuffers(); } bool V4L2VideoDecodeAccelerator::StartDevicePoll() { DVLOGF(3); DCHECK(!device_poll_thread_.IsRunning()); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); // Start up the device poll thread and schedule its first DevicePollTask(). if (!device_poll_thread_.Start()) { VLOGF(1) << "Device thread failed to start"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } device_poll_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask, base::Unretained(this), 0)); return true; } bool V4L2VideoDecodeAccelerator::StopDevicePoll() { DVLOGF(3); if (!device_poll_thread_.IsRunning()) return true; if (decoder_thread_.IsRunning()) DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); // Signal the DevicePollTask() to stop, and stop the device poll thread. if (!device_->SetDevicePollInterrupt()) { VPLOGF(1) << "SetDevicePollInterrupt(): failed"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } device_poll_thread_.Stop(); // Clear the interrupt now, to be sure. if (!device_->ClearDevicePollInterrupt()) { NOTIFY_ERROR(PLATFORM_FAILURE); return false; } DVLOGF(3) << "device poll stopped"; return true; } bool V4L2VideoDecodeAccelerator::StopOutputStream() { VLOGF(2); if (!output_streamon_) return true; __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); output_streamon_ = false; // Output stream is stopped. No need to wait for the buffer anymore. flush_awaiting_last_output_buffer_ = false; for (size_t i = 0; i < output_buffer_map_.size(); ++i) { // After streamoff, the device drops ownership of all buffers, even if we // don't dequeue them explicitly. Some of them may still be owned by the // client however. Reuse only those that aren't. OutputRecord& output_record = output_buffer_map_[i]; if (output_record.state == kAtDevice) { output_record.state = kFree; free_output_buffers_.push_back(i); DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); } } output_buffer_queued_count_ = 0; return true; } bool V4L2VideoDecodeAccelerator::StopInputStream() { VLOGF(2); if (!input_streamon_) return true; __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); input_streamon_ = false; // Reset accounting info for input. while (!input_ready_queue_.empty()) input_ready_queue_.pop(); free_input_buffers_.clear(); for (size_t i = 0; i < input_buffer_map_.size(); ++i) { free_input_buffers_.push_back(i); input_buffer_map_[i].at_device = false; input_buffer_map_[i].bytes_used = 0; input_buffer_map_[i].input_id = -1; } input_buffer_queued_count_ = 0; return true; } void V4L2VideoDecodeAccelerator::StartResolutionChange() { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_NE(decoder_state_, kUninitialized); DCHECK_NE(decoder_state_, kResetting); VLOGF(2) << "Initiate resolution change"; if (!(StopDevicePoll() && StopOutputStream())) return; decoder_state_ = kChangingResolution; SendPictureReady(); // Send all pending PictureReady. if (!image_processor_bitstream_buffer_ids_.empty()) { VLOGF(2) << "Wait image processor to finish before destroying buffers."; return; } if (image_processor_) image_processor_.release()->Destroy(); if (!DestroyOutputBuffers()) { VLOGF(1) << "Failed destroying output buffers."; NOTIFY_ERROR(PLATFORM_FAILURE); return; } FinishResolutionChange(); } void V4L2VideoDecodeAccelerator::FinishResolutionChange() { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_EQ(decoder_state_, kChangingResolution); VLOGF(2); if (decoder_state_ == kError) { VLOGF(2) << "early out: kError state"; return; } struct v4l2_format format; bool again; gfx::Size visible_size; bool ret = GetFormatInfo(&format, &visible_size, &again); if (!ret || again) { VLOGF(1) << "Couldn't get format information after resolution change"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } if (!CreateBuffersForFormat(format, visible_size)) { VLOGF(1) << "Couldn't reallocate buffers after resolution change"; NOTIFY_ERROR(PLATFORM_FAILURE); return; } if (!StartDevicePoll()) return; } void V4L2VideoDecodeAccelerator::DevicePollTask(bool poll_device) { DVLOGF(4); DCHECK(device_poll_thread_.task_runner()->BelongsToCurrentThread()); TRACE_EVENT0("media,gpu", "V4L2VDA::DevicePollTask"); bool event_pending = false; if (!device_->Poll(poll_device, &event_pending)) { NOTIFY_ERROR(PLATFORM_FAILURE); return; } // All processing should happen on ServiceDeviceTask(), since we shouldn't // touch decoder state from this thread. decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ServiceDeviceTask, base::Unretained(this), event_pending)); } void V4L2VideoDecodeAccelerator::NotifyError(Error error) { VLOGF(1); if (!child_task_runner_->BelongsToCurrentThread()) { child_task_runner_->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::NotifyError, weak_this_, error)); return; } if (client_) { client_->NotifyError(error); client_ptr_factory_.reset(); } } void V4L2VideoDecodeAccelerator::SetErrorState(Error error) { // We can touch decoder_state_ only if this is the decoder thread or the // decoder thread isn't running. if (decoder_thread_.task_runner() && !decoder_thread_.task_runner()->BelongsToCurrentThread()) { decoder_thread_.task_runner()->PostTask( FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::SetErrorState, base::Unretained(this), error)); return; } // Post NotifyError only if we are already initialized, as the API does // not allow doing so before that. if (decoder_state_ != kError && decoder_state_ != kUninitialized) NotifyError(error); decoder_state_ = kError; } bool V4L2VideoDecodeAccelerator::GetFormatInfo(struct v4l2_format* format, gfx::Size* visible_size, bool* again) { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); *again = false; memset(format, 0, sizeof(*format)); format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; if (device_->Ioctl(VIDIOC_G_FMT, format) != 0) { if (errno == EINVAL) { // EINVAL means we haven't seen sufficient stream to decode the format. *again = true; return true; } else { VPLOGF(1) << "ioctl() failed: VIDIOC_G_FMT"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } } // Make sure we are still getting the format we set on initialization. if (format->fmt.pix_mp.pixelformat != output_format_fourcc_) { VLOGF(1) << "Unexpected format from G_FMT on output"; return false; } gfx::Size coded_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height); if (visible_size != nullptr) *visible_size = GetVisibleSize(coded_size); return true; } bool V4L2VideoDecodeAccelerator::CreateBuffersForFormat( const struct v4l2_format& format, const gfx::Size& visible_size) { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); output_planes_count_ = format.fmt.pix_mp.num_planes; coded_size_.SetSize(format.fmt.pix_mp.width, format.fmt.pix_mp.height); visible_size_ = visible_size; if (image_processor_device_) { egl_image_size_ = visible_size_; egl_image_planes_count_ = 0; if (!V4L2ImageProcessor::TryOutputFormat( output_format_fourcc_, egl_image_format_fourcc_, &egl_image_size_, &egl_image_planes_count_)) { VLOGF(1) << "Fail to get output size and plane count of processor"; return false; } } else { egl_image_size_ = coded_size_; egl_image_planes_count_ = output_planes_count_; } VLOGF(2) << "new resolution: " << coded_size_.ToString() << ", visible size: " << visible_size_.ToString() << ", decoder output planes count: " << output_planes_count_ << ", EGLImage size: " << egl_image_size_.ToString() << ", EGLImage plane count: " << egl_image_planes_count_; return CreateOutputBuffers(); } gfx::Size V4L2VideoDecodeAccelerator::GetVisibleSize( const gfx::Size& coded_size) { DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); struct v4l2_rect* visible_rect; struct v4l2_selection selection_arg; memset(&selection_arg, 0, sizeof(selection_arg)); selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; selection_arg.target = V4L2_SEL_TGT_COMPOSE; if (device_->Ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) { VLOGF(2) << "VIDIOC_G_SELECTION is supported"; visible_rect = &selection_arg.r; } else { VLOGF(2) << "Fallback to VIDIOC_G_CROP"; struct v4l2_crop crop_arg; memset(&crop_arg, 0, sizeof(crop_arg)); crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; if (device_->Ioctl(VIDIOC_G_CROP, &crop_arg) != 0) { VPLOGF(1) << "ioctl() VIDIOC_G_CROP failed"; return coded_size; } visible_rect = &crop_arg.c; } gfx::Rect rect(visible_rect->left, visible_rect->top, visible_rect->width, visible_rect->height); VLOGF(2) << "visible rectangle is " << rect.ToString(); if (!gfx::Rect(coded_size).Contains(rect)) { DVLOGF(3) << "visible rectangle " << rect.ToString() << " is not inside coded size " << coded_size.ToString(); return coded_size; } if (rect.IsEmpty()) { VLOGF(1) << "visible size is empty"; return coded_size; } // Chrome assume picture frame is coded at (0, 0). if (!rect.origin().IsOrigin()) { VLOGF(1) << "Unexpected visible rectangle " << rect.ToString() << ", top-left is not origin"; return coded_size; } return rect.size(); } bool V4L2VideoDecodeAccelerator::CreateInputBuffers() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); // We always run this as we prepare to initialize. DCHECK_EQ(decoder_state_, kInitialized); DCHECK(!input_streamon_); DCHECK(input_buffer_map_.empty()); struct v4l2_requestbuffers reqbufs; memset(&reqbufs, 0, sizeof(reqbufs)); reqbufs.count = kInputBufferCount; reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; reqbufs.memory = V4L2_MEMORY_MMAP; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs); input_buffer_map_.resize(reqbufs.count); for (size_t i = 0; i < input_buffer_map_.size(); ++i) { free_input_buffers_.push_back(i); // Query for the MEMORY_MMAP pointer. struct v4l2_plane planes[1]; struct v4l2_buffer buffer; memset(&buffer, 0, sizeof(buffer)); memset(planes, 0, sizeof(planes)); buffer.index = i; buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; buffer.memory = V4L2_MEMORY_MMAP; buffer.m.planes = planes; buffer.length = 1; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer); void* address = device_->Mmap(NULL, buffer.m.planes[0].length, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.m.planes[0].m.mem_offset); if (address == MAP_FAILED) { VPLOGF(1) << "mmap() failed"; return false; } input_buffer_map_[i].address = address; input_buffer_map_[i].length = buffer.m.planes[0].length; } return true; } bool V4L2VideoDecodeAccelerator::SetupFormats() { // We always run this as we prepare to initialize. DCHECK(child_task_runner_->BelongsToCurrentThread()); DCHECK_EQ(decoder_state_, kUninitialized); DCHECK(!input_streamon_); DCHECK(!output_streamon_); size_t input_size; gfx::Size max_resolution, min_resolution; device_->GetSupportedResolution(input_format_fourcc_, &min_resolution, &max_resolution); if (max_resolution.width() > 1920 && max_resolution.height() > 1088) input_size = kInputBufferMaxSizeFor4k; else input_size = kInputBufferMaxSizeFor1080p; struct v4l2_fmtdesc fmtdesc; memset(&fmtdesc, 0, sizeof(fmtdesc)); fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; bool is_format_supported = false; while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { if (fmtdesc.pixelformat == input_format_fourcc_) { is_format_supported = true; break; } ++fmtdesc.index; } if (!is_format_supported) { VLOGF(1) << "Input fourcc " << input_format_fourcc_ << " not supported by device."; return false; } struct v4l2_format format; memset(&format, 0, sizeof(format)); format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; format.fmt.pix_mp.pixelformat = input_format_fourcc_; format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size; format.fmt.pix_mp.num_planes = 1; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format); // We have to set up the format for output, because the driver may not allow // changing it once we start streaming; whether it can support our chosen // output format or not may depend on the input format. memset(&fmtdesc, 0, sizeof(fmtdesc)); fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { if (device_->CanCreateEGLImageFrom(fmtdesc.pixelformat)) { output_format_fourcc_ = fmtdesc.pixelformat; break; } ++fmtdesc.index; } DCHECK(!image_processor_device_); if (output_format_fourcc_ == 0) { VLOGF(2) << "Could not find a usable output format. Try image processor"; if (!V4L2ImageProcessor::IsSupported()) { VLOGF(1) << "Image processor not available"; return false; } output_format_fourcc_ = FindImageProcessorInputFormat(); if (output_format_fourcc_ == 0) { VLOGF(1) << "Can't find a usable input format from image processor"; return false; } egl_image_format_fourcc_ = FindImageProcessorOutputFormat(); if (egl_image_format_fourcc_ == 0) { VLOGF(1) << "Can't find a usable output format from image processor"; return false; } image_processor_device_ = V4L2Device::Create(); if (!image_processor_device_) { VLOGF(1) << "Could not create a V4L2Device for image processor"; return false; } egl_image_device_ = image_processor_device_; } else { if (output_mode_ == Config::OutputMode::IMPORT) { VLOGF(1) << "Import mode without image processor is not implemented " << "yet."; return false; } egl_image_format_fourcc_ = output_format_fourcc_; egl_image_device_ = device_; } VLOGF(2) << "Output format=" << output_format_fourcc_; // Just set the fourcc for output; resolution, etc., will come from the // driver once it extracts it from the stream. memset(&format, 0, sizeof(format)); format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; format.fmt.pix_mp.pixelformat = output_format_fourcc_; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format); return true; } uint32_t V4L2VideoDecodeAccelerator::FindImageProcessorInputFormat() { std::vector<uint32_t> processor_input_formats = V4L2ImageProcessor::GetSupportedInputFormats(); struct v4l2_fmtdesc fmtdesc; memset(&fmtdesc, 0, sizeof(fmtdesc)); fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { if (std::find(processor_input_formats.begin(), processor_input_formats.end(), fmtdesc.pixelformat) != processor_input_formats.end()) { VLOGF(2) << "Image processor input format=" << fmtdesc.description; return fmtdesc.pixelformat; } ++fmtdesc.index; } return 0; } uint32_t V4L2VideoDecodeAccelerator::FindImageProcessorOutputFormat() { // Prefer YVU420 and NV12 because ArcGpuVideoDecodeAccelerator only supports // single physical plane. Prefer YVU420 over NV12 because chrome rendering // supports YV12 only. static const uint32_t kPreferredFormats[] = {V4L2_PIX_FMT_YVU420, V4L2_PIX_FMT_NV12}; auto preferred_formats_first = [](uint32_t a, uint32_t b) -> bool { auto* iter_a = std::find(std::begin(kPreferredFormats), std::end(kPreferredFormats), a); auto* iter_b = std::find(std::begin(kPreferredFormats), std::end(kPreferredFormats), b); return iter_a < iter_b; }; std::vector<uint32_t> processor_output_formats = V4L2ImageProcessor::GetSupportedOutputFormats(); // Move the preferred formats to the front. std::sort(processor_output_formats.begin(), processor_output_formats.end(), preferred_formats_first); for (uint32_t processor_output_format : processor_output_formats) { if (device_->CanCreateEGLImageFrom(processor_output_format)) { VLOGF(2) << "Image processor output format=" << processor_output_format; return processor_output_format; } } return 0; } bool V4L2VideoDecodeAccelerator::ResetImageProcessor() { VLOGF(2); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); if (!image_processor_->Reset()) return false; for (size_t i = 0; i < output_buffer_map_.size(); ++i) { OutputRecord& output_record = output_buffer_map_[i]; if (output_record.state == kAtProcessor) { output_record.state = kFree; free_output_buffers_.push_back(i); } } while (!image_processor_bitstream_buffer_ids_.empty()) image_processor_bitstream_buffer_ids_.pop(); return true; } bool V4L2VideoDecodeAccelerator::CreateImageProcessor() { VLOGF(2); DCHECK(!image_processor_); image_processor_.reset(new V4L2ImageProcessor(image_processor_device_)); v4l2_memory output_memory_type = (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP : V4L2_MEMORY_DMABUF); // Unretained is safe because |this| owns image processor and there will be // no callbacks after processor destroys. if (!image_processor_->Initialize( V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), V4L2_MEMORY_DMABUF, output_memory_type, visible_size_, coded_size_, visible_size_, egl_image_size_, output_buffer_map_.size(), base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError, base::Unretained(this)))) { VLOGF(1) << "Initialize image processor failed"; NOTIFY_ERROR(PLATFORM_FAILURE); return false; } VLOGF(2) << "image_processor_->output_allocated_size()=" << image_processor_->output_allocated_size().ToString(); DCHECK(image_processor_->output_allocated_size() == egl_image_size_); if (image_processor_->input_allocated_size() != coded_size_) { VLOGF(1) << "Image processor should be able to take the output coded " << "size of decoder " << coded_size_.ToString() << " without adjusting to " << image_processor_->input_allocated_size().ToString(); NOTIFY_ERROR(PLATFORM_FAILURE); return false; } return true; } bool V4L2VideoDecodeAccelerator::ProcessFrame(int32_t bitstream_buffer_id, int output_buffer_index) { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); OutputRecord& output_record = output_buffer_map_[output_buffer_index]; DCHECK_EQ(output_record.state, kAtDevice); output_record.state = kAtProcessor; image_processor_bitstream_buffer_ids_.push(bitstream_buffer_id); std::vector<int> processor_input_fds; for (auto& fd : output_record.processor_input_fds) { processor_input_fds.push_back(fd.get()); } scoped_refptr<VideoFrame> input_frame = VideoFrame::WrapExternalDmabufs( V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), coded_size_, gfx::Rect(visible_size_), visible_size_, processor_input_fds, base::TimeDelta()); std::vector<base::ScopedFD> processor_output_fds; if (output_mode_ == Config::OutputMode::IMPORT) { for (auto& fd : output_record.processor_output_fds) { processor_output_fds.push_back( base::ScopedFD(HANDLE_EINTR(dup(fd.get())))); if (!processor_output_fds.back().is_valid()) { VPLOGF(1) << "Failed duplicating a dmabuf fd"; return false; } } } // Unretained is safe because |this| owns image processor and there will // be no callbacks after processor destroys. image_processor_->Process( input_frame, output_buffer_index, std::move(processor_output_fds), base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed, base::Unretained(this), bitstream_buffer_id)); return true; } bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { VLOGF(2); DCHECK(decoder_state_ == kInitialized || decoder_state_ == kChangingResolution); DCHECK(!output_streamon_); DCHECK(output_buffer_map_.empty()); // Number of output buffers we need. struct v4l2_control ctrl; memset(&ctrl, 0, sizeof(ctrl)); ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE; IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_CTRL, &ctrl); output_dpb_size_ = ctrl.value; // Output format setup in Initialize(). uint32_t buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount; if (image_processor_device_) buffer_count += kDpbOutputBufferExtraCountForImageProcessor; DVLOGF(3) << "buffer_count=" << buffer_count << ", coded_size=" << egl_image_size_.ToString(); // With ALLOCATE mode the client can sample it as RGB and doesn't need to // know the precise format. VideoPixelFormat pixel_format = (output_mode_ == Config::OutputMode::IMPORT) ? V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_) : PIXEL_FORMAT_UNKNOWN; child_task_runner_->PostTask( FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_, buffer_count, pixel_format, 1, egl_image_size_, device_->GetTextureTarget())); // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding // or event handling while we are waiting for AssignPictureBuffers(). Not // having Pictures available would not have prevented us from making decoding // progress entirely e.g. in the case of H.264 where we could further decode // non-slice NALUs and could even get another resolution change before we were // done with this one. After we get the buffers, we'll go back into kIdle and // kick off further event processing, and eventually go back into kDecoding // once no more events are pending (if any). decoder_state_ = kAwaitingPictureBuffers; return true; } void V4L2VideoDecodeAccelerator::DestroyInputBuffers() { VLOGF(2); DCHECK(!decoder_thread_.IsRunning() || decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK(!input_streamon_); if (input_buffer_map_.empty()) return; for (size_t i = 0; i < input_buffer_map_.size(); ++i) { if (input_buffer_map_[i].address != NULL) { device_->Munmap(input_buffer_map_[i].address, input_buffer_map_[i].length); } } struct v4l2_requestbuffers reqbufs; memset(&reqbufs, 0, sizeof(reqbufs)); reqbufs.count = 0; reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; reqbufs.memory = V4L2_MEMORY_MMAP; IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs); input_buffer_map_.clear(); free_input_buffers_.clear(); } bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { VLOGF(2); DCHECK(!decoder_thread_.IsRunning() || decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK(!output_streamon_); bool success = true; if (output_buffer_map_.empty()) return true; for (size_t i = 0; i < output_buffer_map_.size(); ++i) { OutputRecord& output_record = output_buffer_map_[i]; if (output_record.egl_image != EGL_NO_IMAGE_KHR) { child_task_runner_->PostTask( FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, egl_display_, output_record.egl_image)); } if (output_record.egl_sync != EGL_NO_SYNC_KHR) { if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) { VLOGF(1) << "eglDestroySyncKHR failed."; success = false; } } DVLOGF(3) << "dismissing PictureBuffer id=" << output_record.picture_id; child_task_runner_->PostTask( FROM_HERE, base::Bind(&Client::DismissPictureBuffer, client_, output_record.picture_id)); } struct v4l2_requestbuffers reqbufs; memset(&reqbufs, 0, sizeof(reqbufs)); reqbufs.count = 0; reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; reqbufs.memory = V4L2_MEMORY_MMAP; if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) { VPLOGF(1) << "ioctl() failed: VIDIOC_REQBUFS"; NOTIFY_ERROR(PLATFORM_FAILURE); success = false; } output_buffer_map_.clear(); while (!free_output_buffers_.empty()) free_output_buffers_.pop_front(); output_buffer_queued_count_ = 0; // The client may still hold some buffers. The texture holds a reference to // the buffer. It is OK to free the buffer and destroy EGLImage here. decoder_frames_at_client_ = 0; return success; } void V4L2VideoDecodeAccelerator::SendPictureReady() { DVLOGF(4); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); bool send_now = (decoder_state_ == kChangingResolution || decoder_state_ == kResetting || decoder_flushing_); while (pending_picture_ready_.size() > 0) { bool cleared = pending_picture_ready_.front().cleared; const Picture& picture = pending_picture_ready_.front().picture; if (cleared && picture_clearing_count_ == 0) { // This picture is cleared. It can be posted to a thread different than // the main GPU thread to reduce latency. This should be the case after // all pictures are cleared at the beginning. decode_task_runner_->PostTask( FROM_HERE, base::Bind(&Client::PictureReady, decode_client_, picture)); pending_picture_ready_.pop(); } else if (!cleared || send_now) { DVLOGF(4) << "cleared=" << pending_picture_ready_.front().cleared << ", decoder_state_=" << decoder_state_ << ", decoder_flushing_=" << decoder_flushing_ << ", picture_clearing_count_=" << picture_clearing_count_; // If the picture is not cleared, post it to the child thread because it // has to be cleared in the child thread. A picture only needs to be // cleared once. If the decoder is changing resolution, resetting or // flushing, send all pictures to ensure PictureReady arrive before // ProvidePictureBuffers, NotifyResetDone, or NotifyFlushDone. child_task_runner_->PostTaskAndReply( FROM_HERE, base::Bind(&Client::PictureReady, client_, picture), // Unretained is safe. If Client::PictureReady gets to run, |this| is // alive. Destroy() will wait the decode thread to finish. base::Bind(&V4L2VideoDecodeAccelerator::PictureCleared, base::Unretained(this))); picture_clearing_count_++; pending_picture_ready_.pop(); } else { // This picture is cleared. But some pictures are about to be cleared on // the child thread. To preserve the order, do not send this until those // pictures are cleared. break; } } } void V4L2VideoDecodeAccelerator::PictureCleared() { DVLOGF(4) << "clearing count=" << picture_clearing_count_; DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK_GT(picture_clearing_count_, 0); picture_clearing_count_--; SendPictureReady(); } void V4L2VideoDecodeAccelerator::FrameProcessed(int32_t bitstream_buffer_id, int output_buffer_index) { DVLOGF(4) << "output_buffer_index=" << output_buffer_index << ", bitstream_buffer_id=" << bitstream_buffer_id; DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK(!image_processor_bitstream_buffer_ids_.empty()); DCHECK(image_processor_bitstream_buffer_ids_.front() == bitstream_buffer_id); DCHECK_GE(output_buffer_index, 0); DCHECK_LT(output_buffer_index, static_cast<int>(output_buffer_map_.size())); OutputRecord& output_record = output_buffer_map_[output_buffer_index]; DVLOGF(4) << "picture_id=" << output_record.picture_id; DCHECK_EQ(output_record.state, kAtProcessor); DCHECK_NE(output_record.picture_id, -1); // Send the processed frame to render. output_record.state = kAtClient; decoder_frames_at_client_++; image_processor_bitstream_buffer_ids_.pop(); // TODO(hubbe): Insert correct color space. http://crbug.com/647725 const Picture picture(output_record.picture_id, bitstream_buffer_id, gfx::Rect(visible_size_), gfx::ColorSpace(), false); pending_picture_ready_.push(PictureRecord(output_record.cleared, picture)); SendPictureReady(); output_record.cleared = true; // Flush or resolution change may be waiting image processor to finish. if (image_processor_bitstream_buffer_ids_.empty()) { NotifyFlushDoneIfNeeded(); if (decoder_state_ == kChangingResolution) StartResolutionChange(); } } void V4L2VideoDecodeAccelerator::ImageProcessorError() { VLOGF(1) << "Image processor error"; NOTIFY_ERROR(PLATFORM_FAILURE); } } // namespace media
null
null
null
null
48,808
25,591
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,591
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "extensions/browser/extension_pref_value_map.h" #include <stdint.h> #include <memory> #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/values.h" #include "components/prefs/pref_store_observer_mock.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" namespace { const char kExt1[] = "ext1"; const char kExt2[] = "ext2"; const char kExt3[] = "ext3"; const char kPref1[] = "path1.subpath"; const char kPref2[] = "path2"; const char kPref3[] = "path3"; const char kPref4[] = "path4"; } // namespace static base::Value* CreateVal(const char* str) { return new base::Value(str); } static base::Time CreateTime(int64_t t) { return base::Time::FromInternalValue(t); } template <typename BASECLASS> class ExtensionPrefValueMapTestBase : public BASECLASS { public: static const extensions::ExtensionPrefsScope kRegular = extensions::kExtensionPrefsScopeRegular; static const extensions::ExtensionPrefsScope kRegularOnly = extensions::kExtensionPrefsScopeRegularOnly; static const extensions::ExtensionPrefsScope kIncognitoPersistent = extensions::kExtensionPrefsScopeIncognitoPersistent; static const extensions::ExtensionPrefsScope kIncognitoSessionOnly = extensions::kExtensionPrefsScopeIncognitoSessionOnly; // Returns an empty string if the key is not set. std::string GetValue(const char * key, bool incognito) const { const base::Value *value = epvm_.GetEffectivePrefValue(key, incognito, NULL); std::string string_value; if (value) value->GetAsString(&string_value); return string_value; } // Registers the extension as enabled but without incognito permission. void RegisterExtension(const std::string& ext_id, const base::Time& install_time) { epvm_.RegisterExtension( ext_id, install_time, true /*enabled*/, false /*incognito*/); } protected: ExtensionPrefValueMap epvm_; }; class ExtensionPrefValueMapTest : public ExtensionPrefValueMapTestBase<testing::Test> { }; // A gmock-ified implementation of PrefStore::Observer. class ExtensionPrefValueMapObserverMock : public ExtensionPrefValueMap::Observer { public: ExtensionPrefValueMapObserverMock() {} virtual ~ExtensionPrefValueMapObserverMock() {} MOCK_METHOD1(OnPrefValueChanged, void(const std::string&)); MOCK_METHOD0(OnInitializationCompleted, void()); MOCK_METHOD0(OnExtensionPrefValueMapDestruction, void()); private: DISALLOW_COPY_AND_ASSIGN(ExtensionPrefValueMapObserverMock); }; TEST_F(ExtensionPrefValueMapTest, SetAndGetPrefValue) { RegisterExtension(kExt1, CreateTime(10)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); EXPECT_EQ("val1", GetValue(kPref1, false)); }; TEST_F(ExtensionPrefValueMapTest, GetNotSetPrefValue) { RegisterExtension(kExt1, CreateTime(10)); EXPECT_EQ(std::string(), GetValue(kPref1, false)); }; // Make sure the last-installed extension wins for each preference. TEST_F(ExtensionPrefValueMapTest, Override) { RegisterExtension(kExt1, CreateTime(10)); RegisterExtension(kExt2, CreateTime(20)); RegisterExtension(kExt3, CreateTime(30)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); epvm_.SetExtensionPref(kExt2, kPref1, kRegular, CreateVal("val2")); epvm_.SetExtensionPref(kExt3, kPref1, kRegular, CreateVal("val3")); epvm_.SetExtensionPref(kExt1, kPref2, kRegular, CreateVal("val4")); epvm_.SetExtensionPref(kExt2, kPref2, kRegular, CreateVal("val5")); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val6")); epvm_.SetExtensionPref(kExt1, kPref2, kRegular, CreateVal("val7")); epvm_.SetExtensionPref(kExt1, kPref3, kRegular, CreateVal("val8")); EXPECT_EQ("val3", GetValue(kPref1, false)); EXPECT_EQ("val5", GetValue(kPref2, false)); EXPECT_EQ("val8", GetValue(kPref3, false)); } TEST_F(ExtensionPrefValueMapTest, OverrideChecks) { RegisterExtension(kExt1, CreateTime(10)); RegisterExtension(kExt2, CreateTime(20)); RegisterExtension(kExt3, CreateTime(30)); EXPECT_FALSE(epvm_.DoesExtensionControlPref(kExt1, kPref1, NULL)); EXPECT_FALSE(epvm_.DoesExtensionControlPref(kExt2, kPref1, NULL)); EXPECT_FALSE(epvm_.DoesExtensionControlPref(kExt3, kPref1, NULL)); EXPECT_TRUE(epvm_.CanExtensionControlPref(kExt1, kPref1, false)); EXPECT_TRUE(epvm_.CanExtensionControlPref(kExt2, kPref1, false)); EXPECT_TRUE(epvm_.CanExtensionControlPref(kExt3, kPref1, false)); epvm_.SetExtensionPref(kExt2, kPref1, kRegular, CreateVal("val1")); EXPECT_FALSE(epvm_.DoesExtensionControlPref(kExt1, kPref1, NULL)); EXPECT_TRUE(epvm_.DoesExtensionControlPref(kExt2, kPref1, NULL)); EXPECT_FALSE(epvm_.DoesExtensionControlPref(kExt3, kPref1, NULL)); EXPECT_FALSE(epvm_.CanExtensionControlPref(kExt1, kPref1, false)); EXPECT_TRUE(epvm_.CanExtensionControlPref(kExt2, kPref1, false)); EXPECT_TRUE(epvm_.CanExtensionControlPref(kExt3, kPref1, false)); } TEST_F(ExtensionPrefValueMapTest, SetAndGetPrefValueIncognito) { RegisterExtension(kExt1, CreateTime(10)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); // Check that the value is not propagated until the extension gets incognito // permission. EXPECT_EQ(std::string(), GetValue(kPref1, true)); epvm_.SetExtensionIncognitoState(kExt1, true); EXPECT_EQ("val1", GetValue(kPref1, true)); epvm_.SetExtensionIncognitoState(kExt1, false); EXPECT_EQ(std::string(), GetValue(kPref1, true)); } TEST_F(ExtensionPrefValueMapTest, UninstallOnlyExtension) { RegisterExtension(kExt1, CreateTime(10)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); epvm_.UnregisterExtension(kExt1); EXPECT_EQ(std::string(), GetValue(kPref1, false)); } // Tests uninstalling an extension that wasn't winning for any preferences. TEST_F(ExtensionPrefValueMapTest, UninstallIrrelevantExtension) { RegisterExtension(kExt1, CreateTime(10)); RegisterExtension(kExt2, CreateTime(10)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); epvm_.SetExtensionPref(kExt2, kPref1, kRegular, CreateVal("val2")); epvm_.SetExtensionPref(kExt1, kPref2, kRegular, CreateVal("val3")); epvm_.SetExtensionPref(kExt2, kPref2, kRegular, CreateVal("val4")); epvm_.UnregisterExtension(kExt1); EXPECT_EQ("val2", GetValue(kPref1, false)); EXPECT_EQ("val4", GetValue(kPref2, false)); } // Tests uninstalling an extension that was winning for all preferences. TEST_F(ExtensionPrefValueMapTest, UninstallExtensionFromTop) { RegisterExtension(kExt1, CreateTime(10)); RegisterExtension(kExt2, CreateTime(20)); RegisterExtension(kExt3, CreateTime(30)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); epvm_.SetExtensionPref(kExt2, kPref1, kRegular, CreateVal("val2")); epvm_.SetExtensionPref(kExt3, kPref1, kRegular, CreateVal("val3")); epvm_.SetExtensionPref(kExt1, kPref2, kRegular, CreateVal("val4")); epvm_.SetExtensionPref(kExt3, kPref2, kRegular, CreateVal("val5")); epvm_.UnregisterExtension(kExt3); EXPECT_EQ("val2", GetValue(kPref1, false)); EXPECT_EQ("val4", GetValue(kPref2, false)); } // Tests uninstalling an extension that was winning for only some preferences. TEST_F(ExtensionPrefValueMapTest, UninstallExtensionFromMiddle) { RegisterExtension(kExt1, CreateTime(10)); RegisterExtension(kExt2, CreateTime(20)); RegisterExtension(kExt3, CreateTime(30)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); epvm_.SetExtensionPref(kExt2, kPref1, kRegular, CreateVal("val2")); epvm_.SetExtensionPref(kExt3, kPref1, kRegular, CreateVal("val3")); epvm_.SetExtensionPref(kExt1, kPref2, kRegular, CreateVal("val4")); epvm_.SetExtensionPref(kExt2, kPref2, kRegular, CreateVal("val5")); epvm_.SetExtensionPref(kExt1, kPref3, kRegular, CreateVal("val6")); epvm_.SetExtensionPref(kExt2, kPref4, kRegular, CreateVal("val7")); epvm_.UnregisterExtension(kExt2); EXPECT_EQ("val3", GetValue(kPref1, false)); EXPECT_EQ("val4", GetValue(kPref2, false)); EXPECT_EQ("val6", GetValue(kPref3, false)); EXPECT_EQ(std::string(), GetValue(kPref4, false)); } // Tests triggering of notifications to registered observers. TEST_F(ExtensionPrefValueMapTest, NotifyWhenNeeded) { using testing::Mock; using testing::StrEq; RegisterExtension(kExt1, CreateTime(10)); ExtensionPrefValueMapObserverMock observer; epvm_.AddObserver(&observer); EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); Mock::VerifyAndClearExpectations(&observer); // Write the same value again. EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))).Times(0); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); Mock::VerifyAndClearExpectations(&observer); // Override incognito value. EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val2")); Mock::VerifyAndClearExpectations(&observer); // Override non-incognito value. EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val3")); Mock::VerifyAndClearExpectations(&observer); // Disable. EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))); epvm_.SetExtensionState(kExt1, false); Mock::VerifyAndClearExpectations(&observer); // Enable. EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))); epvm_.SetExtensionState(kExt1, true); Mock::VerifyAndClearExpectations(&observer); // Uninstall EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))); epvm_.UnregisterExtension(kExt1); Mock::VerifyAndClearExpectations(&observer); epvm_.RemoveObserver(&observer); // Write new value --> no notification after removing observer. EXPECT_CALL(observer, OnPrefValueChanged(std::string(kPref1))).Times(0); RegisterExtension(kExt1, CreateTime(10)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val4")); Mock::VerifyAndClearExpectations(&observer); } // Tests disabling an extension. TEST_F(ExtensionPrefValueMapTest, DisableExt) { RegisterExtension(kExt1, CreateTime(10)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); epvm_.SetExtensionState(kExt1, false); EXPECT_EQ(std::string(), GetValue(kPref1, false)); } // Tests disabling and reenabling an extension. TEST_F(ExtensionPrefValueMapTest, ReenableExt) { RegisterExtension(kExt1, CreateTime(10)); epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal("val1")); epvm_.SetExtensionState(kExt1, false); epvm_.SetExtensionState(kExt1, true); EXPECT_EQ("val1", GetValue(kPref1, false)); } struct OverrideIncognitoTestCase { OverrideIncognitoTestCase(bool enable_ext1_in_incognito, bool enable_ext2_in_incognito, int val_ext1_regular, int val_ext1_regular_only, int val_ext1_incognito_pers, int val_ext1_incognito_sess, int val_ext2_regular, int val_ext2_regular_only, int val_ext2_incognito_pers, int val_ext2_incognito_sess, int effective_value_regular, int effective_value_incognito) : enable_ext1_in_incognito_(enable_ext1_in_incognito), enable_ext2_in_incognito_(enable_ext2_in_incognito), val_ext1_regular_(val_ext1_regular), val_ext1_regular_only_(val_ext1_regular_only), val_ext1_incognito_pers_(val_ext1_incognito_pers), val_ext1_incognito_sess_(val_ext1_incognito_sess), val_ext2_regular_(val_ext2_regular), val_ext2_regular_only_(val_ext2_regular_only), val_ext2_incognito_pers_(val_ext2_incognito_pers), val_ext2_incognito_sess_(val_ext2_incognito_sess), effective_value_regular_(effective_value_regular), effective_value_incognito_(effective_value_incognito) {} bool enable_ext1_in_incognito_; bool enable_ext2_in_incognito_; // pers. = persistent // sess. = session only int val_ext1_regular_; // pref value of extension 1 int val_ext1_regular_only_; // pref value of extension 1 regular-only. int val_ext1_incognito_pers_; // pref value of extension 1 incognito pers. int val_ext1_incognito_sess_; // pref value of extension 1 incognito sess. int val_ext2_regular_; // pref value of extension 2 int val_ext2_regular_only_; // pref value of extension 2 regular-only. int val_ext2_incognito_pers_; // pref value of extension 2 incognito pers. int val_ext2_incognito_sess_; // pref value of extension 2 incognito sess. int effective_value_regular_; // desired winner regular int effective_value_incognito_; // desired winner incognito }; class ExtensionPrefValueMapTestIncognitoTests : public ExtensionPrefValueMapTestBase< testing::TestWithParam<OverrideIncognitoTestCase> > { }; TEST_P(ExtensionPrefValueMapTestIncognitoTests, OverrideIncognito) { OverrideIncognitoTestCase test = GetParam(); const char* strings[] = { "", // undefined "val1", "val2", "val3", "val4", "val5", "val6", "val7", "val8", }; const bool kEnabled = true; epvm_.RegisterExtension( kExt1, CreateTime(10), kEnabled, test.enable_ext1_in_incognito_); epvm_.RegisterExtension( kExt2, CreateTime(20), kEnabled, test.enable_ext2_in_incognito_); if (test.val_ext1_regular_) { epvm_.SetExtensionPref(kExt1, kPref1, kRegular, CreateVal(strings[test.val_ext1_regular_])); } if (test.val_ext1_regular_only_) { epvm_.SetExtensionPref(kExt1, kPref1, kRegularOnly, CreateVal(strings[test.val_ext1_regular_only_])); } if (test.val_ext1_incognito_pers_) { epvm_.SetExtensionPref(kExt1, kPref1, kIncognitoPersistent, CreateVal(strings[test.val_ext1_incognito_pers_])); } if (test.val_ext1_incognito_sess_) { epvm_.SetExtensionPref(kExt1, kPref1, kIncognitoSessionOnly, CreateVal(strings[test.val_ext1_incognito_sess_])); } if (test.val_ext2_regular_) { epvm_.SetExtensionPref(kExt2, kPref1, kRegular, CreateVal(strings[test.val_ext2_regular_])); } if (test.val_ext2_regular_only_) { epvm_.SetExtensionPref(kExt2, kPref1, kRegularOnly, CreateVal(strings[test.val_ext2_regular_only_])); } if (test.val_ext2_incognito_pers_) { epvm_.SetExtensionPref(kExt2, kPref1, kIncognitoPersistent, CreateVal(strings[test.val_ext2_incognito_pers_])); } if (test.val_ext2_incognito_sess_) { epvm_.SetExtensionPref(kExt2, kPref1, kIncognitoSessionOnly, CreateVal(strings[test.val_ext2_incognito_sess_])); } std::string actual; EXPECT_EQ(strings[test.effective_value_regular_], GetValue(kPref1, false)); EXPECT_EQ(strings[test.effective_value_incognito_], GetValue(kPref1, true)); epvm_.UnregisterExtension(kExt1); epvm_.UnregisterExtension(kExt2); } INSTANTIATE_TEST_CASE_P( ExtensionPrefValueMapTestIncognitoTestsInstance, ExtensionPrefValueMapTestIncognitoTests, testing::Values( // e.g. (true, 1, 0, 0, 0, 0, 0, 7, 0, 1, 7), means: // ext1 regular is set to "val1", ext2 incognito persistent is set to // "val7" // --> the winning regular value is "val1", the winning incognito // value is "val7". OverrideIncognitoTestCase(true, true, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1), OverrideIncognitoTestCase(true, true, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1), OverrideIncognitoTestCase(true, true, 1, 0, 3, 0, 0, 0, 0, 0, 1, 3), OverrideIncognitoTestCase(true, true, 1, 0, 0, 4, 0, 0, 0, 0, 1, 4), OverrideIncognitoTestCase(true, true, 1, 0, 3, 4, 0, 0, 0, 0, 1, 4), OverrideIncognitoTestCase(true, true, 1, 2, 3, 0, 0, 0, 0, 0, 2, 3), OverrideIncognitoTestCase(true, true, 1, 0, 0, 0, 5, 0, 0, 0, 5, 5), OverrideIncognitoTestCase(true, true, 1, 2, 3, 0, 5, 0, 0, 0, 5, 5), OverrideIncognitoTestCase(true, true, 1, 0, 0, 0, 0, 6, 0, 0, 6, 1), OverrideIncognitoTestCase(true, true, 1, 0, 3, 0, 5, 6, 0, 0, 6, 5), OverrideIncognitoTestCase(true, true, 1, 0, 0, 4, 5, 6, 0, 0, 6, 5), OverrideIncognitoTestCase(true, true, 1, 0, 0, 0, 0, 0, 7, 0, 1, 7), OverrideIncognitoTestCase(true, true, 1, 2, 0, 0, 5, 0, 7, 0, 5, 7), OverrideIncognitoTestCase(true, true, 1, 2, 0, 0, 5, 0, 0, 8, 5, 8), OverrideIncognitoTestCase(true, true, 1, 2, 0, 0, 5, 0, 7, 8, 5, 8), OverrideIncognitoTestCase(true, true, 1, 2, 3, 0, 0, 6, 7, 0, 6, 7), // Same tests as above but w/o incognito permission. OverrideIncognitoTestCase(false, false, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0), OverrideIncognitoTestCase(false, false, 1, 2, 0, 0, 0, 0, 0, 0, 2, 0), OverrideIncognitoTestCase(false, false, 1, 0, 3, 0, 0, 0, 0, 0, 1, 0), OverrideIncognitoTestCase(false, false, 1, 0, 0, 4, 0, 0, 0, 0, 1, 0), OverrideIncognitoTestCase(false, false, 1, 0, 3, 4, 0, 0, 0, 0, 1, 0), OverrideIncognitoTestCase(false, false, 1, 2, 3, 0, 0, 0, 0, 0, 2, 0), OverrideIncognitoTestCase(false, false, 1, 0, 0, 0, 5, 0, 0, 0, 5, 0), OverrideIncognitoTestCase(false, false, 1, 2, 3, 0, 5, 0, 0, 0, 5, 0), OverrideIncognitoTestCase(false, false, 1, 0, 0, 0, 0, 6, 0, 0, 6, 0), OverrideIncognitoTestCase(false, false, 1, 0, 3, 0, 5, 6, 0, 0, 6, 0), OverrideIncognitoTestCase(false, false, 1, 0, 0, 4, 5, 6, 0, 0, 6, 0), OverrideIncognitoTestCase(false, false, 1, 0, 0, 0, 0, 0, 7, 0, 1, 0), OverrideIncognitoTestCase(false, false, 1, 2, 0, 0, 5, 0, 7, 0, 5, 0), OverrideIncognitoTestCase(false, false, 1, 2, 0, 0, 5, 0, 0, 8, 5, 0), OverrideIncognitoTestCase(false, false, 1, 2, 0, 0, 5, 0, 7, 8, 5, 0), OverrideIncognitoTestCase(false, false, 1, 2, 3, 0, 0, 6, 7, 0, 6, 0) ));
null
null
null
null
22,454
43,121
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
208,116
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _UAPI_LINUX_TTY_H #define _UAPI_LINUX_TTY_H /* * 'tty.h' defines some structures used by tty_io.c and some defines. */ #define NR_LDISCS 30 /* line disciplines */ #define N_TTY 0 #define N_SLIP 1 #define N_MOUSE 2 #define N_PPP 3 #define N_STRIP 4 #define N_AX25 5 #define N_X25 6 /* X.25 async */ #define N_6PACK 7 #define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */ #define N_R3964 9 /* Reserved for Simatic R3964 module */ #define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ #define N_IRDA 11 /* Linux IrDa - http://irda.sourceforge.net/ */ #define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data */ /* cards about SMS messages */ #define N_HDLC 13 /* synchronous HDLC */ #define N_SYNC_PPP 14 /* synchronous PPP */ #define N_HCI 15 /* Bluetooth HCI UART */ #define N_GIGASET_M101 16 /* Siemens Gigaset M101 serial DECT adapter */ #define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ #define N_PPS 18 /* Pulse per Second */ #define N_V253 19 /* Codec control over voice modem */ #define N_CAIF 20 /* CAIF protocol for talking to modems */ #define N_GSM0710 21 /* GSM 0710 Mux */ #define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ #define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */ #define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */ #define N_NCI 25 /* NFC NCI UART */ #endif /* _UAPI_LINUX_TTY_H */
null
null
null
null
116,463
16,960
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
16,960
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stddef.h> #include "components/viz/common/quads/compositor_frame.h" #include "components/viz/common/surfaces/parent_local_surface_id_allocator.h" #include "components/viz/service/frame_sinks/compositor_frame_sink_support.h" #include "components/viz/service/frame_sinks/frame_sink_manager_impl.h" #include "components/viz/service/surfaces/surface.h" #include "components/viz/service/surfaces/surface_hittest.h" #include "components/viz/service/surfaces/surface_manager.h" #include "components/viz/test/compositor_frame_helpers.h" #include "components/viz/test/fake_compositor_frame_sink_client.h" #include "components/viz/test/surface_hittest_test_helpers.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/skia/include/core/SkColor.h" #include "ui/gfx/geometry/size.h" namespace viz { namespace { constexpr bool kIsRoot = true; constexpr bool kIsChildRoot = false; constexpr bool kNeedsSyncPoints = true; constexpr FrameSinkId kRootFrameSink(2, 0); constexpr FrameSinkId kChildFrameSink(65563, 0); constexpr FrameSinkId kArbitraryFrameSink(1337, 7331); struct TestCase { SurfaceId input_surface_id; gfx::Point input_point; SurfaceId expected_layer_tree_frame_sink_id; gfx::Point expected_output_point; bool query_renderer; }; void RunTests(SurfaceHittestDelegate* delegate, SurfaceManager* manager, TestCase* tests, size_t test_count) { SurfaceHittest hittest(delegate, manager); bool query_renderer; for (size_t i = 0; i < test_count; ++i) { const TestCase& test = tests[i]; gfx::Point point(test.input_point); gfx::Transform transform; EXPECT_EQ(test.expected_layer_tree_frame_sink_id, hittest.GetTargetSurfaceAtPoint(test.input_surface_id, point, &transform, &query_renderer)); transform.TransformPoint(&point); EXPECT_EQ(test.expected_output_point, point); EXPECT_EQ(test.query_renderer, query_renderer); // Verify that GetTransformToTargetSurface returns true and returns the same // transform as returned by GetTargetSurfaceAtPoint. gfx::Transform target_transform; EXPECT_TRUE(hittest.GetTransformToTargetSurface( test.input_surface_id, test.expected_layer_tree_frame_sink_id, &target_transform)); EXPECT_EQ(transform, target_transform); } } } // namespace using namespace test; class SurfaceHittestTest : public testing::Test { public: SurfaceHittestTest() = default; ~SurfaceHittestTest() override = default; CompositorFrameSinkSupport& root_support() { return *supports_[0]; } CompositorFrameSinkSupport& child_support() { return *supports_[1]; } SurfaceManager* surface_manager() { return frame_sink_manager_.surface_manager(); } // testing::Test: void SetUp() override { testing::Test::SetUp(); supports_.push_back(std::make_unique<CompositorFrameSinkSupport>( &client_, &frame_sink_manager_, kRootFrameSink, kIsRoot, kNeedsSyncPoints)); supports_.push_back(std::make_unique<CompositorFrameSinkSupport>( &client_, &frame_sink_manager_, kChildFrameSink, kIsChildRoot, kNeedsSyncPoints)); } void TearDown() override { supports_.clear(); } private: FrameSinkManagerImpl frame_sink_manager_; std::vector<std::unique_ptr<CompositorFrameSinkSupport>> supports_; FakeCompositorFrameSinkClient client_; DISALLOW_COPY_AND_ASSIGN(SurfaceHittestTest); }; // This test verifies that hit testing on a surface that does not exist does // not crash. TEST_F(SurfaceHittestTest, Hittest_BadCompositorFrameDoesNotCrash) { // Creates a root surface. gfx::Rect root_rect(300, 300); RenderPass* root_pass = nullptr; CompositorFrame root_frame = CreateCompositorFrame(root_rect, &root_pass); // Add a reference to a non-existant child surface on the root surface. SurfaceId child_surface_id( kArbitraryFrameSink, LocalSurfaceId(0xdeadbeef, 0xdeadbeef, base::UnguessableToken::Create())); gfx::Rect child_rect(200, 200); CreateSurfaceDrawQuad(root_pass, gfx::Transform(), root_rect, child_rect, child_surface_id); // Submit the root frame. ParentLocalSurfaceIdAllocator root_allocator; LocalSurfaceId root_local_surface_id = root_allocator.GenerateId(); SurfaceId root_surface_id(kRootFrameSink, root_local_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); { SurfaceHittest hittest(nullptr, surface_manager()); // It is expected this test will complete without crashes. gfx::Transform transform; bool query_renderer; EXPECT_EQ(root_surface_id, hittest.GetTargetSurfaceAtPoint( root_surface_id, gfx::Point(100, 100), &transform, &query_renderer)); } } TEST_F(SurfaceHittestTest, Hittest_SingleSurface) { // Creates a root surface. gfx::Rect root_rect(300, 300); RenderPass* root_pass = nullptr; CompositorFrame root_frame = CreateCompositorFrame(root_rect, &root_pass); // Submit the root frame. ParentLocalSurfaceIdAllocator root_allocator; LocalSurfaceId root_local_surface_id = root_allocator.GenerateId(); SurfaceId root_surface_id(kRootFrameSink, root_local_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); TestCase tests[] = { {root_surface_id, gfx::Point(100, 100), root_surface_id, gfx::Point(100, 100), false}, }; RunTests(nullptr, surface_manager(), tests, arraysize(tests)); } TEST_F(SurfaceHittestTest, Hittest_ChildSurface) { // Creates a root surface. gfx::Rect root_rect(300, 300); RenderPass* root_pass = nullptr; CompositorFrame root_frame = CreateCompositorFrame(root_rect, &root_pass); // Add a reference to the child surface on the root surface. ParentLocalSurfaceIdAllocator child_allocator; LocalSurfaceId child_local_surface_id = child_allocator.GenerateId(); SurfaceId child_surface_id(kChildFrameSink, child_local_surface_id); gfx::Rect child_rect(200, 200); CreateSurfaceDrawQuad( root_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_rect, child_surface_id); // Submit the root frame. ParentLocalSurfaceIdAllocator root_allocator; LocalSurfaceId root_local_surface_id = root_allocator.GenerateId(); SurfaceId root_surface_id(kRootFrameSink, root_local_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); // Creates a child surface. RenderPass* child_pass = nullptr; CompositorFrame child_frame = CreateCompositorFrame(child_rect, &child_pass); // Add a solid quad in the child surface. gfx::Rect child_solid_quad_rect(100, 100); CreateSolidColorDrawQuad( child_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_solid_quad_rect); // Submit the frame. child_support().SubmitCompositorFrame(child_local_surface_id, std::move(child_frame)); TestCase tests[] = {{root_surface_id, gfx::Point(10, 10), root_surface_id, gfx::Point(10, 10), false}, {root_surface_id, gfx::Point(99, 99), root_surface_id, gfx::Point(99, 99), false}, {root_surface_id, gfx::Point(100, 100), child_surface_id, gfx::Point(50, 50), true}, {root_surface_id, gfx::Point(199, 199), child_surface_id, gfx::Point(149, 149), true}, {root_surface_id, gfx::Point(200, 200), root_surface_id, gfx::Point(200, 200), false}, {root_surface_id, gfx::Point(290, 290), root_surface_id, gfx::Point(290, 290), false}}; RunTests(nullptr, surface_manager(), tests, arraysize(tests)); // Submit another root frame, with a slightly perturbed child Surface. root_frame = CreateCompositorFrame(root_rect, &root_pass); CreateSurfaceDrawQuad( root_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 75.0f, 0.0f, 1.0f, 0.0f, 75.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_rect, child_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); // Verify that point (100, 100) no longer falls on the child surface. // Verify that the transform to the child surface's space has also shifted. { SurfaceHittest hittest(nullptr, surface_manager()); gfx::Point point(100, 100); gfx::Transform transform; bool query_renderer; EXPECT_EQ(root_surface_id, hittest.GetTargetSurfaceAtPoint(root_surface_id, point, &transform, &query_renderer)); transform.TransformPoint(&point); EXPECT_EQ(gfx::Point(100, 100), point); EXPECT_EQ(query_renderer, false); gfx::Point point_in_target_space(100, 100); gfx::Transform target_transform; EXPECT_TRUE(hittest.GetTransformToTargetSurface( root_surface_id, child_surface_id, &target_transform)); target_transform.TransformPoint(&point_in_target_space); EXPECT_NE(transform, target_transform); EXPECT_EQ(gfx::Point(25, 25), point_in_target_space); } } TEST_F(SurfaceHittestTest, Hittest_OccludedChildSurface) { // Creates a root surface. gfx::Rect root_rect(300, 300); RenderPass* root_pass = nullptr; CompositorFrame root_frame = CreateCompositorFrame(root_rect, &root_pass); // Add a solid quad to the root surface, occluding the child surface. gfx::Rect root_solid_quad_rect(100, 100); CreateSolidColorDrawQuad( root_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, root_solid_quad_rect); // Add a reference to the child surface on the root surface. ParentLocalSurfaceIdAllocator child_allocator; LocalSurfaceId child_local_surface_id = child_allocator.GenerateId(); SurfaceId child_surface_id(kChildFrameSink, child_local_surface_id); gfx::Rect child_rect(200, 200); CreateSurfaceDrawQuad( root_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_rect, child_surface_id); // Submit the root frame. ParentLocalSurfaceIdAllocator root_allocator; LocalSurfaceId root_local_surface_id = root_allocator.GenerateId(); SurfaceId root_surface_id(kRootFrameSink, root_local_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); // Creates a child surface. RenderPass* child_pass = nullptr; CompositorFrame child_frame = CreateCompositorFrame(child_rect, &child_pass); // Add a solid quad in the child surface. gfx::Rect child_solid_quad_rect(100, 100); CreateSolidColorDrawQuad( child_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_solid_quad_rect); // Submit the frame. child_support().SubmitCompositorFrame(child_local_surface_id, std::move(child_frame)); TestCase tests[] = {{root_surface_id, gfx::Point(10, 10), root_surface_id, gfx::Point(10, 10), false}, {root_surface_id, gfx::Point(99, 99), root_surface_id, gfx::Point(99, 99), true}, {root_surface_id, gfx::Point(100, 100), root_surface_id, gfx::Point(100, 100), true}, {root_surface_id, gfx::Point(199, 199), child_surface_id, gfx::Point(149, 149), true}, {root_surface_id, gfx::Point(200, 200), root_surface_id, gfx::Point(200, 200), false}, {root_surface_id, gfx::Point(290, 290), root_surface_id, gfx::Point(290, 290), false}}; RunTests(nullptr, surface_manager(), tests, arraysize(tests)); } // This test verifies that hit testing will progress to the next quad if it // encounters an invalid RenderPassDrawQuad for whatever reason. TEST_F(SurfaceHittestTest, Hittest_InvalidRenderPassDrawQuad) { // Creates a root surface. gfx::Rect root_rect(300, 300); RenderPass* root_pass = nullptr; CompositorFrame root_frame = CreateCompositorFrame(root_rect, &root_pass); // Create a RenderPassDrawQuad to a non-existant RenderPass. int invalid_render_pass_id = 1337; CreateRenderPassDrawQuad(root_pass, gfx::Transform(), root_rect, root_rect, invalid_render_pass_id); // Add a reference to the child surface on the root surface. ParentLocalSurfaceIdAllocator child_allocator; LocalSurfaceId child_local_surface_id = child_allocator.GenerateId(); SurfaceId child_surface_id(kChildFrameSink, child_local_surface_id); gfx::Rect child_rect(200, 200); CreateSurfaceDrawQuad( root_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_rect, child_surface_id); // Submit the root frame. ParentLocalSurfaceIdAllocator root_allocator; LocalSurfaceId root_local_surface_id = root_allocator.GenerateId(); SurfaceId root_surface_id(kRootFrameSink, root_local_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); // Creates a child surface. RenderPass* child_pass = nullptr; CompositorFrame child_frame = CreateCompositorFrame(child_rect, &child_pass); // Add a solid quad in the child surface. gfx::Rect child_solid_quad_rect(100, 100); CreateSolidColorDrawQuad( child_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_solid_quad_rect); // Submit the frame. child_support().SubmitCompositorFrame(child_local_surface_id, std::move(child_frame)); TestCase tests[] = {{root_surface_id, gfx::Point(10, 10), root_surface_id, gfx::Point(10, 10), false}, {root_surface_id, gfx::Point(99, 99), root_surface_id, gfx::Point(99, 99), false}, {root_surface_id, gfx::Point(100, 100), child_surface_id, gfx::Point(50, 50), true}, {root_surface_id, gfx::Point(199, 199), child_surface_id, gfx::Point(149, 149), true}, {root_surface_id, gfx::Point(200, 200), root_surface_id, gfx::Point(200, 200), false}, {root_surface_id, gfx::Point(290, 290), root_surface_id, gfx::Point(290, 290), false}}; RunTests(nullptr, surface_manager(), tests, arraysize(tests)); } TEST_F(SurfaceHittestTest, Hittest_RenderPassDrawQuad) { // Create a CompositorFrame with two RenderPasses. gfx::Rect root_rect(300, 300); CompositorFrame root_frame = MakeDefaultCompositorFrame(); RenderPassList& render_pass_list = root_frame.render_pass_list; // Create a child RenderPass. int child_render_pass_id = 3; gfx::Transform transform_to_root_target(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f); CreateRenderPass(child_render_pass_id, gfx::Rect(100, 100), transform_to_root_target, &render_pass_list); // Create the root RenderPass. int root_render_pass_id = 2; CreateRenderPass(root_render_pass_id, root_rect, gfx::Transform(), &render_pass_list); RenderPass* root_pass = nullptr; root_pass = root_frame.render_pass_list.back().get(); // Create a RenderPassDrawQuad. gfx::Rect render_pass_quad_rect(100, 100); CreateRenderPassDrawQuad(root_pass, transform_to_root_target, root_rect, render_pass_quad_rect, child_render_pass_id); // Add a solid quad in the child render pass. RenderPass* child_render_pass = root_frame.render_pass_list.front().get(); gfx::Rect child_solid_quad_rect(100, 100); CreateSolidColorDrawQuad(child_render_pass, gfx::Transform(), gfx::Rect(100, 100), child_solid_quad_rect); // Submit the root frame. ParentLocalSurfaceIdAllocator root_allocator; LocalSurfaceId root_local_surface_id = root_allocator.GenerateId(); SurfaceId root_surface_id(kRootFrameSink, root_local_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); TestCase tests[] = {// These tests just miss the RenderPassDrawQuad. {root_surface_id, gfx::Point(49, 49), root_surface_id, gfx::Point(49, 49), false}, {root_surface_id, gfx::Point(150, 150), root_surface_id, gfx::Point(150, 150), false}, // These tests just hit the boundaries of the // RenderPassDrawQuad. {root_surface_id, gfx::Point(50, 50), root_surface_id, gfx::Point(50, 50), false}, {root_surface_id, gfx::Point(149, 149), root_surface_id, gfx::Point(149, 149), false}, // These tests fall somewhere in the center of the // RenderPassDrawQuad. {root_surface_id, gfx::Point(99, 99), root_surface_id, gfx::Point(99, 99), false}, {root_surface_id, gfx::Point(100, 100), root_surface_id, gfx::Point(100, 100), false}}; RunTests(nullptr, surface_manager(), tests, arraysize(tests)); } TEST_F(SurfaceHittestTest, Hittest_SingleSurface_WithInsetsDelegate) { // Creates a root surface. gfx::Rect root_rect(300, 300); RenderPass* root_pass = nullptr; CompositorFrame root_frame = CreateCompositorFrame(root_rect, &root_pass); // Add a reference to the child surface on the root surface. ParentLocalSurfaceIdAllocator child_allocator; LocalSurfaceId child_local_surface_id = child_allocator.GenerateId(); SurfaceId child_surface_id(kChildFrameSink, child_local_surface_id); gfx::Rect child_rect(200, 200); CreateSurfaceDrawQuad( root_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 50.0f, 0.0f, 1.0f, 0.0f, 50.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_rect, child_surface_id); // Submit the root frame. ParentLocalSurfaceIdAllocator root_allocator; LocalSurfaceId root_local_surface_id = root_allocator.GenerateId(); SurfaceId root_surface_id(kRootFrameSink, root_local_surface_id); root_support().SubmitCompositorFrame(root_local_surface_id, std::move(root_frame)); // Creates a child surface. RenderPass* child_pass = nullptr; CompositorFrame child_frame = CreateCompositorFrame(child_rect, &child_pass); // Add a solid quad in the child surface. gfx::Rect child_solid_quad_rect(190, 190); CreateSolidColorDrawQuad( child_pass, gfx::Transform(1.0f, 0.0f, 0.0f, 5.0f, 0.0f, 1.0f, 0.0f, 5.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f), root_rect, child_solid_quad_rect); // Submit the frame. child_support().SubmitCompositorFrame(child_local_surface_id, std::move(child_frame)); TestCase test_expectations_without_insets[] = { {root_surface_id, gfx::Point(55, 55), child_surface_id, gfx::Point(5, 5), true}, {root_surface_id, gfx::Point(60, 60), child_surface_id, gfx::Point(10, 10), true}, {root_surface_id, gfx::Point(239, 239), child_surface_id, gfx::Point(189, 189), true}, {root_surface_id, gfx::Point(244, 244), child_surface_id, gfx::Point(194, 194), true}, {root_surface_id, gfx::Point(50, 50), root_surface_id, gfx::Point(50, 50), false}, {root_surface_id, gfx::Point(249, 249), root_surface_id, gfx::Point(249, 249), false}, }; TestSurfaceHittestDelegate empty_delegate; RunTests(&empty_delegate, surface_manager(), test_expectations_without_insets, arraysize(test_expectations_without_insets)); // Verify that insets have NOT affected hit targeting. EXPECT_EQ(0, empty_delegate.reject_target_overrides()); EXPECT_EQ(0, empty_delegate.accept_target_overrides()); TestCase test_expectations_with_reject_insets[] = { // Point (55, 55) falls outside the child surface due to the insets // introduced above. {root_surface_id, gfx::Point(55, 55), root_surface_id, gfx::Point(55, 55), false}, // These two points still fall within the child surface. {root_surface_id, gfx::Point(60, 60), child_surface_id, gfx::Point(10, 10), true}, {root_surface_id, gfx::Point(239, 239), child_surface_id, gfx::Point(189, 189), true}, // Point (244, 244) falls outside the child surface due to the insets // introduced above. {root_surface_id, gfx::Point(244, 244), root_surface_id, gfx::Point(244, 244), false}, // Next two points also fall within within the insets indroduced above. {root_surface_id, gfx::Point(50, 50), root_surface_id, gfx::Point(50, 50), false}, {root_surface_id, gfx::Point(249, 249), root_surface_id, gfx::Point(249, 249), false}, }; TestSurfaceHittestDelegate reject_delegate; reject_delegate.AddInsetsForRejectSurface(child_surface_id, gfx::Insets(10, 10, 10, 10)); RunTests(&reject_delegate, surface_manager(), test_expectations_with_reject_insets, arraysize(test_expectations_with_reject_insets)); // Verify that insets have affected hit targeting. EXPECT_EQ(4, reject_delegate.reject_target_overrides()); EXPECT_EQ(0, reject_delegate.accept_target_overrides()); TestCase test_expectations_with_accept_insets[] = { {root_surface_id, gfx::Point(55, 55), child_surface_id, gfx::Point(5, 5), true}, {root_surface_id, gfx::Point(60, 60), child_surface_id, gfx::Point(10, 10), true}, {root_surface_id, gfx::Point(239, 239), child_surface_id, gfx::Point(189, 189), true}, {root_surface_id, gfx::Point(244, 244), child_surface_id, gfx::Point(194, 194), true}, // Next two points fall within within the insets indroduced above. {root_surface_id, gfx::Point(50, 50), child_surface_id, gfx::Point(0, 0), true}, {root_surface_id, gfx::Point(249, 249), child_surface_id, gfx::Point(199, 199), true}, }; TestSurfaceHittestDelegate accept_delegate; accept_delegate.AddInsetsForAcceptSurface(child_surface_id, gfx::Insets(5, 5, 5, 5)); RunTests(&accept_delegate, surface_manager(), test_expectations_with_accept_insets, arraysize(test_expectations_with_accept_insets)); // Verify that insets have affected hit targeting. EXPECT_EQ(0, accept_delegate.reject_target_overrides()); EXPECT_EQ(2, accept_delegate.accept_target_overrides()); } } // namespace viz
null
null
null
null
13,823
13,821
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
178,816
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* thread_info.h: h8300 low-level thread information * adapted from the i386 and PPC versions by Yoshinori Sato <ysato@users.sourceforge.jp> * * Copyright (C) 2002 David Howells (dhowells@redhat.com) * - Incorporating suggestions made by Linus Torvalds and Dave Miller */ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H #include <asm/page.h> #include <asm/segment.h> #ifdef __KERNEL__ /* * Size of kernel stack for each process. This must be a power of 2... */ #define THREAD_SIZE_ORDER 1 #define THREAD_SIZE 8192 /* 2 pages */ #ifndef __ASSEMBLY__ /* * low level task data. * If you change this, change the TI_* offsets below to match. */ struct thread_info { struct task_struct *task; /* main task structure */ unsigned long flags; /* low level flags */ int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; }; /* * macros/functions for gaining access to the thread information structure */ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ } #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; __asm__("mov.l sp, %0\n\t" "and.w %1, %T0" : "=&r"(ti) : "i" (~(THREAD_SIZE-1) & 0xffff)); return ti; } #endif /* __ASSEMBLY__ */ /* * thread information flag bit numbers */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_SINGLESTEP 3 /* singlestepping active */ #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ #define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling TIF_NEED_RESCHED */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ _TIF_SYSCALL_TRACEPOINT) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) #endif /* __KERNEL__ */ #endif /* _ASM_THREAD_INFO_H */
null
null
null
null
87,163
41,578
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
41,578
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef TOOLS_GN_SUBSTITUTION_PATTERN_H_ #define TOOLS_GN_SUBSTITUTION_PATTERN_H_ #include <string> #include <vector> #include "tools/gn/substitution_type.h" class BuildSettings; class Err; class ParseNode; class Value; // Represents a string with {{substitution_patterns}} in them. class SubstitutionPattern { public: struct Subrange { Subrange(); explicit Subrange(SubstitutionType t, const std::string& l = std::string()); ~Subrange(); inline bool operator==(const Subrange& other) const { return type == other.type && literal == other.literal; } SubstitutionType type; // When type_ == LITERAL, this specifies the literal. std::string literal; }; SubstitutionPattern(); SubstitutionPattern(const SubstitutionPattern& other); ~SubstitutionPattern(); // Parses the given string and fills in the pattern. The pattern must only // be initialized once. On failure, returns false and sets the error. bool Parse(const Value& value, Err* err); bool Parse(const std::string& str, const ParseNode* origin, Err* err); // Makes a pattern given a hardcoded string. Will assert if the string is // not a valid pattern. static SubstitutionPattern MakeForTest(const char* str); // Returns the pattern as a string with substitutions in them. std::string AsString() const; // Sets the bits in the given vector corresponding to the substitutions used // by this pattern. SUBSTITUTION_LITERAL is ignored. void FillRequiredTypes(SubstitutionBits* bits) const; // Checks whether this pattern resolves to something in the output directory // for the given build settings. If not, returns false and fills in the given // error. bool IsInOutputDir(const BuildSettings* build_settings, Err* err) const; // Returns a vector listing the substitutions used by this pattern, not // counting SUBSTITUTION_LITERAL. const std::vector<SubstitutionType>& required_types() const { return required_types_; } const std::vector<Subrange>& ranges() const { return ranges_; } bool empty() const { return ranges_.empty(); } private: std::vector<Subrange> ranges_; const ParseNode* origin_; std::vector<SubstitutionType> required_types_; }; #endif // TOOLS_GN_SUBSTITUTION_PATTERN_H_
null
null
null
null
38,441
316
1,2,3,4,5,6
train_val
dc7b094a338c6c521f918f478e993f0f74bbea0d
316
Chrome
1
https://github.com/chromium/chromium
2011-06-15 06:45:53+00:00
InputMethodStatusConnection() : current_input_method_changed_(NULL), register_ime_properties_(NULL), update_ime_property_(NULL), connection_change_handler_(NULL), language_library_(NULL), ibus_(NULL), ibus_config_(NULL) { }
CVE-2011-2804
CWE-399
https://github.com/chromium/chromium/commit/dc7b094a338c6c521f918f478e993f0f74bbea0d
Low
316
38,176
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
203,171
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#define cpuhp_setup_state_nocalls(a, b, c, d) (0)
null
null
null
null
111,518
29,008
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
29,008
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2012 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Client for interacting with low-level protocol messages. #include "google/cacheinvalidation/impl/protocol-handler.h" #include <stddef.h> #include "google/cacheinvalidation/deps/string_util.h" #include "google/cacheinvalidation/impl/constants.h" #include "google/cacheinvalidation/impl/invalidation-client-core.h" #include "google/cacheinvalidation/impl/log-macro.h" #include "google/cacheinvalidation/impl/proto-helpers.h" #include "google/cacheinvalidation/impl/recurring-task.h" namespace invalidation { using ::ipc::invalidation::ConfigChangeMessage; using ::ipc::invalidation::InfoMessage; using ::ipc::invalidation::InitializeMessage; using ::ipc::invalidation::InitializeMessage_DigestSerializationType_BYTE_BASED; using ::ipc::invalidation::InvalidationMessage; using ::ipc::invalidation::PropertyRecord; using ::ipc::invalidation::RegistrationMessage; using ::ipc::invalidation::RegistrationSyncMessage; using ::ipc::invalidation::ServerHeader; using ::ipc::invalidation::ServerToClientMessage; using ::ipc::invalidation::TokenControlMessage; string ServerMessageHeader::ToString() const { return StringPrintf( "Token: %s, Summary: %s", ProtoHelpers::ToString(*token_).c_str(), ProtoHelpers::ToString(*registration_summary_).c_str()); } void ParsedMessage::InitFrom(const ServerToClientMessage& raw_message) { base_message = raw_message; // Does a deep copy. // For each field, assign it to the corresponding protobuf field if // present, else NULL. header.InitFrom(&base_message.header().client_token(), base_message.header().has_registration_summary() ? &base_message.header().registration_summary() : NULL); token_control_message = base_message.has_token_control_message() ? &base_message.token_control_message() : NULL; invalidation_message = base_message.has_invalidation_message() ? &base_message.invalidation_message() : NULL; registration_status_message = base_message.has_registration_status_message() ? &base_message.registration_status_message() : NULL; registration_sync_request_message = base_message.has_registration_sync_request_message() ? &base_message.registration_sync_request_message() : NULL; config_change_message = base_message.has_config_change_message() ? &base_message.config_change_message() : NULL; info_request_message = base_message.has_info_request_message() ? &base_message.info_request_message() : NULL; error_message = base_message.has_error_message() ? &base_message.error_message() : NULL; } ProtocolHandler::ProtocolHandler( const ProtocolHandlerConfigP& config, SystemResources* resources, Smearer* smearer, Statistics* statistics, int client_type, const string& application_name, ProtocolListener* listener, TiclMessageValidator* msg_validator) : logger_(resources->logger()), internal_scheduler_(resources->internal_scheduler()), network_(resources->network()), throttle_(config.rate_limit(), internal_scheduler_, NewPermanentCallback(this, &ProtocolHandler::SendMessageToServer)), listener_(listener), msg_validator_(msg_validator), message_id_(1), last_known_server_time_ms_(0), next_message_send_time_ms_(0), statistics_(statistics), batcher_(resources->logger(), statistics), client_type_(client_type) { // Initialize client version. ProtoHelpers::InitClientVersion(resources->platform(), application_name, &client_version_); } void ProtocolHandler::InitConfig(ProtocolHandlerConfigP* config) { // Add rate limits. // Allow at most 3 messages every 5 seconds. int window_ms = 5 * 1000; int num_messages_per_window = 3; ProtoHelpers::InitRateLimitP(window_ms, num_messages_per_window, config->add_rate_limit()); } void ProtocolHandler::InitConfigForTest(ProtocolHandlerConfigP* config) { // No rate limits. int small_batch_delay_for_test = 200; config->set_batching_delay_ms(small_batch_delay_for_test); // At most one message per second. ProtoHelpers::InitRateLimitP(1000, 1, config->add_rate_limit()); // At most six messages per minute. ProtoHelpers::InitRateLimitP(60 * 1000, 6, config->add_rate_limit()); } bool ProtocolHandler::HandleIncomingMessage(const string& incoming_message, ParsedMessage* parsed_message) { ServerToClientMessage message; message.ParseFromString(incoming_message); if (!message.IsInitialized()) { TLOG(logger_, WARNING, "Incoming message is unparseable: %s", ProtoHelpers::ToString(incoming_message).c_str()); return false; } // Validate the message. If this passes, we can blindly assume valid messages // from here on. TLOG(logger_, FINE, "Incoming message: %s", ProtoHelpers::ToString(message).c_str()); if (!msg_validator_->IsValid(message)) { statistics_->RecordError( Statistics::ClientErrorType_INCOMING_MESSAGE_FAILURE); TLOG(logger_, SEVERE, "Received invalid message: %s", ProtoHelpers::ToString(message).c_str()); return false; } // Check the version of the message. const ServerHeader& message_header = message.header(); if (message_header.protocol_version().version().major_version() != Constants::kProtocolMajorVersion) { statistics_->RecordError( Statistics::ClientErrorType_PROTOCOL_VERSION_FAILURE); TLOG(logger_, SEVERE, "Dropping message with incompatible version: %s", ProtoHelpers::ToString(message).c_str()); return false; } // Check if it is a ConfigChangeMessage which indicates that messages should // no longer be sent for a certain duration. Perform this check before the // token is even checked. if (message.has_config_change_message()) { const ConfigChangeMessage& config_change_msg = message.config_change_message(); statistics_->RecordReceivedMessage( Statistics::ReceivedMessageType_CONFIG_CHANGE); if (config_change_msg.has_next_message_delay_ms()) { // Validator has ensured that it is positive. next_message_send_time_ms_ = GetCurrentTimeMs() + config_change_msg.next_message_delay_ms(); } return false; // Ignore all other messages in the envelope. } if (message_header.server_time_ms() > last_known_server_time_ms_) { last_known_server_time_ms_ = message_header.server_time_ms(); } parsed_message->InitFrom(message); return true; } bool ProtocolHandler::CheckServerToken(const string& server_token) { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; const string& client_token = listener_->GetClientToken(); // If we do not have a client token yet, there is nothing to compare. The // message must have an initialize message and the upper layer will do the // appropriate checks. Hence, we return true if client_token is empty. if (client_token.empty()) { // No token. Return true so that we'll attempt to deliver a token control // message (if any) to the listener in handleIncomingMessage. return true; } if (client_token != server_token) { // Bad token - reject whole message. However, our channel can send us // messages intended for other clients belonging to the same user, so don't // log too loudly. TLOG(logger_, INFO, "Incoming message has bad token: %s, %s", ProtoHelpers::ToString(client_token).c_str(), ProtoHelpers::ToString(server_token).c_str()); statistics_->RecordError(Statistics::ClientErrorType_TOKEN_MISMATCH); return false; } return true; } void ProtocolHandler::SendInitializeMessage( const ApplicationClientIdP& application_client_id, const string& nonce, BatchingTask* batching_task, const string& debug_string) { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; if (application_client_id.client_type() != client_type_) { // This condition is not fatal, but it probably represents a bug somewhere // if it occurs. TLOG(logger_, WARNING, "Client type in application id does not match " "constructor-provided type: %s vs %s", ProtoHelpers::ToString(application_client_id).c_str(), client_type_); } // Simply store the message in pending_initialize_message_ and send it // when the batching task runs. InitializeMessage* message = new InitializeMessage(); ProtoHelpers::InitInitializeMessage(application_client_id, nonce, message); TLOG(logger_, INFO, "Batching initialize message for client: %s, %s", debug_string.c_str(), ProtoHelpers::ToString(*message).c_str()); batcher_.SetInitializeMessage(message); batching_task->EnsureScheduled(debug_string); } void ProtocolHandler::SendInfoMessage( const vector<pair<string, int> >& performance_counters, ClientConfigP* client_config, bool request_server_registration_summary, BatchingTask* batching_task) { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; // Simply store the message in pending_info_message_ and send it // when the batching task runs. InfoMessage* message = new InfoMessage(); message->mutable_client_version()->CopyFrom(client_version_); // Add configuration parameters. if (client_config != NULL) { message->mutable_client_config()->CopyFrom(*client_config); } // Add performance counters. for (size_t i = 0; i < performance_counters.size(); ++i) { PropertyRecord* counter = message->add_performance_counter(); counter->set_name(performance_counters[i].first); counter->set_value(performance_counters[i].second); } // Indicate whether we want the server's registration summary sent back. message->set_server_registration_summary_requested( request_server_registration_summary); TLOG(logger_, INFO, "Batching info message for client: %s", ProtoHelpers::ToString(*message).c_str()); batcher_.SetInfoMessage(message); batching_task->EnsureScheduled("Send-info"); } void ProtocolHandler::SendRegistrations( const vector<ObjectIdP>& object_ids, RegistrationP::OpType reg_op_type, BatchingTask* batching_task) { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; for (size_t i = 0; i < object_ids.size(); ++i) { batcher_.AddRegistration(object_ids[i], reg_op_type); } batching_task->EnsureScheduled("Send-registrations"); } void ProtocolHandler::SendInvalidationAck(const InvalidationP& invalidation, BatchingTask* batching_task) { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; // We could summarize acks if there are suppressing invalidations - we don't // since it is unlikely to be too beneficial here. batcher_.AddAck(invalidation); batching_task->EnsureScheduled("Send-ack"); } void ProtocolHandler::SendRegistrationSyncSubtree( const RegistrationSubtree& reg_subtree, BatchingTask* batching_task) { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; TLOG(logger_, INFO, "Adding subtree: %s", ProtoHelpers::ToString(reg_subtree).c_str()); batcher_.AddRegSubtree(reg_subtree); batching_task->EnsureScheduled("Send-reg-sync"); } void ProtocolHandler::SendMessageToServer() { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; if (next_message_send_time_ms_ > GetCurrentTimeMs()) { TLOG(logger_, WARNING, "In quiet period: not sending message to server: " "%s > %s", SimpleItoa(next_message_send_time_ms_).c_str(), SimpleItoa(GetCurrentTimeMs()).c_str()); return; } const bool has_client_token(!listener_->GetClientToken().empty()); ClientToServerMessage builder; if (!batcher_.ToBuilder(&builder, has_client_token)) { TLOG(logger_, WARNING, "Unable to build message"); return; } ClientHeader* outgoing_header = builder.mutable_header(); InitClientHeader(outgoing_header); // Validate the message and send it. ++message_id_; if (!msg_validator_->IsValid(builder)) { TLOG(logger_, SEVERE, "Tried to send invalid message: %s", ProtoHelpers::ToString(builder).c_str()); statistics_->RecordError( Statistics::ClientErrorType_OUTGOING_MESSAGE_FAILURE); return; } TLOG(logger_, FINE, "Sending message to server: %s", ProtoHelpers::ToString(builder).c_str()); statistics_->RecordSentMessage(Statistics::SentMessageType_TOTAL); string serialized; builder.SerializeToString(&serialized); network_->SendMessage(serialized); // Record that the message was sent. We do this inline to match what the // Java Ticl, which is constrained by Android requirements, does. listener_->HandleMessageSent(); } void ProtocolHandler::InitClientHeader(ClientHeader* builder) { CHECK(internal_scheduler_->IsRunningOnThread()) << "Not on internal thread"; ProtoHelpers::InitProtocolVersion(builder->mutable_protocol_version()); builder->set_client_time_ms(GetCurrentTimeMs()); builder->set_message_id(StringPrintf("%d", message_id_)); builder->set_max_known_server_time_ms(last_known_server_time_ms_); builder->set_client_type(client_type_); listener_->GetRegistrationSummary(builder->mutable_registration_summary()); const string& client_token = listener_->GetClientToken(); if (!client_token.empty()) { TLOG(logger_, FINE, "Sending token on client->server message: %s", ProtoHelpers::ToString(client_token).c_str()); builder->set_client_token(client_token); } } bool Batcher::ToBuilder(ClientToServerMessage* builder, bool has_client_token) { // Check if an initialize message needs to be sent. if (pending_initialize_message_.get() != NULL) { statistics_->RecordSentMessage(Statistics::SentMessageType_INITIALIZE); builder->mutable_initialize_message()->CopyFrom( *pending_initialize_message_); pending_initialize_message_.reset(); } // Note: Even if an initialize message is being sent, we can send additional // messages such as registration messages, etc to the server. But if there is // no token and an initialize message is not being sent, we cannot send any // other message. if (!has_client_token && !builder->has_initialize_message()) { // Cannot send any message. TLOG(logger_, WARNING, "Cannot send message since no token and no initialize msg: %s", ProtoHelpers::ToString(*builder).c_str()); statistics_->RecordError(Statistics::ClientErrorType_TOKEN_MISSING_FAILURE); return false; } // Check for pending batched operations and add to message builder if needed. // Add reg, acks, reg subtrees - clear them after adding. if (!pending_acked_invalidations_.empty()) { InitAckMessage(builder->mutable_invalidation_ack_message()); statistics_->RecordSentMessage( Statistics::SentMessageType_INVALIDATION_ACK); } // Check regs. if (!pending_registrations_.empty()) { InitRegistrationMessage(builder->mutable_registration_message()); statistics_->RecordSentMessage(Statistics::SentMessageType_REGISTRATION); } // Check reg substrees. if (!pending_reg_subtrees_.empty()) { RegistrationSyncMessage* sync_message = builder->mutable_registration_sync_message(); set<RegistrationSubtree, ProtoCompareLess>::const_iterator iter; for (iter = pending_reg_subtrees_.begin(); iter != pending_reg_subtrees_.end(); ++iter) { sync_message->add_subtree()->CopyFrom(*iter); } pending_reg_subtrees_.clear(); statistics_->RecordSentMessage( Statistics::SentMessageType_REGISTRATION_SYNC); } // Check info message. if (pending_info_message_.get() != NULL) { statistics_->RecordSentMessage(Statistics::SentMessageType_INFO); builder->mutable_info_message()->CopyFrom(*pending_info_message_); pending_info_message_.reset(); } return true; } void Batcher::InitRegistrationMessage( RegistrationMessage* reg_message) { CHECK(!pending_registrations_.empty()); // Run through the pending_registrations map. map<ObjectIdP, RegistrationP::OpType, ProtoCompareLess>::iterator iter; for (iter = pending_registrations_.begin(); iter != pending_registrations_.end(); ++iter) { ProtoHelpers::InitRegistrationP(iter->first, iter->second, reg_message->add_registration()); } pending_registrations_.clear(); } void Batcher::InitAckMessage(InvalidationMessage* ack_message) { CHECK(!pending_acked_invalidations_.empty()); // Run through pending_acked_invalidations_ set. set<InvalidationP, ProtoCompareLess>::iterator iter; for (iter = pending_acked_invalidations_.begin(); iter != pending_acked_invalidations_.end(); iter++) { ack_message->add_invalidation()->CopyFrom(*iter); } pending_acked_invalidations_.clear(); } } // namespace invalidation
null
null
null
null
25,871