Unnamed: 0
int64 0
2.93k
| code
stringlengths 101
62.2k
| docs
stringlengths 51
10.7k
| doc_len
int64 4
1.74k
| words
int64 4
4.82k
| lang
stringclasses 1
value | prompt
stringlengths 320
71.2k
|
---|---|---|---|---|---|---|
1,900 | def _pi_coeff(arg, cycles=1):
r
arg = sympify(arg)
if arg is pi:
return S.One
elif not arg:
return S.Zero
elif arg.is_Mul:
cx = arg.coeff(pi)
if cx:
c, x = cx.as_coeff_Mul() # pi is not included as coeff
if c.is_Float:
# recast exact binary fractions to Rationals
f = abs(c) % 1
if f != 0:
p = -int(round(log(f, 2).evalf()))
m = 2**p
cm = c*m
i = int(cm)
if i == cm:
c = Rational(i, m)
cx = c*x
else:
c = Rational(int(c))
cx = c*x
if x.is_integer:
c2 = c % 2
if c2 == 1:
return x
elif not c2:
if x.is_even is not None: # known parity
return S.Zero
return Integer(2)
else:
return c2*x
return cx
elif arg.is_zero:
return S.Zero
|
When arg is a Number times $\pi$ (e.g. $3\pi/2$) then return the Number
normalized to be in the range $[0, 2]$, else `None`.
When an even multiple of $\pi$ is encountered, if it is multiplying
something with known parity then the multiple is returned as 0 otherwise
as 2.
Examples
========
>>> from sympy.functions.elementary.trigonometric import _pi_coeff
>>> from sympy import pi, Dummy
>>> from sympy.abc import x
>>> _pi_coeff(3*x*pi)
3*x
>>> _pi_coeff(11*pi/7)
11/7
>>> _pi_coeff(-11*pi/7)
3/7
>>> _pi_coeff(4*pi)
0
>>> _pi_coeff(5*pi)
1
>>> _pi_coeff(5.0*pi)
1
>>> _pi_coeff(5.5*pi)
3/2
>>> _pi_coeff(2 + pi)
>>> _pi_coeff(2*Dummy(integer=True)*pi)
2
>>> _pi_coeff(2*Dummy(even=True)*pi)
0
| 98 | 122 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _pi_coeff(arg, cycles=1):
r
arg = sympify(arg)
if arg is pi:
return S.One
elif not arg:
return S.Zero
elif arg.is_Mul:
cx = arg.coeff(pi)
if cx:
c, x = cx.as_coeff_Mul() # pi is not included as coeff
if c.is_Float:
# recast exact binary fractions to Rationals
f = abs(c) % 1
if f != 0:
p = -int(round(log(f, 2).evalf()))
m = 2**p
cm = c*m
i = int(cm)
if i == cm:
c = Rational(i, m)
cx = c*x
else:
c = Rational(int(c))
cx = c*x
if x.is_integer:
c2 = c % 2
if c2 == 1:
return x
elif not c2:
if x.is_even is not None: # known parity
return S.Zero
return Integer(2)
else:
return c2*x
return cx
elif arg.is_zero:
return S.Zero
```
###Assistant :
When arg is a Number times $\pi$ (e.g. $3\pi/2$) then return the Number
normalized to be in the range $[0, 2]$, else `None`.
When an even multiple of $\pi$ is encountered, if it is multiplying
something with known parity then the multiple is returned as 0 otherwise
as 2.
Examples
========
>>> from sympy.functions.elementary.trigonometric import _pi_coeff
>>> from sympy import pi, Dummy
>>> from sympy.abc import x
>>> _pi_coeff(3*x*pi)
3*x
>>> _pi_coeff(11*pi/7)
11/7
>>> _pi_coeff(-11*pi/7)
3/7
>>> _pi_coeff(4*pi)
0
>>> _pi_coeff(5*pi)
1
>>> _pi_coeff(5.0*pi)
1
>>> _pi_coeff(5.5*pi)
3/2
>>> _pi_coeff(2 + pi)
>>> _pi_coeff(2*Dummy(integer=True)*pi)
2
>>> _pi_coeff(2*Dummy(even=True)*pi)
0
|
1,901 | async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:
fmt = None
if not (url := info.get(CONF_STILL_IMAGE_URL)):
return {}, None
if not isinstance(url, template_helper.Template) and url:
url = cv.template(url)
url.hass = hass
try:
url = url.async_render(parse_result=False)
except TemplateError as err:
_LOGGER.error("Error parsing template %s: %s", url, err)
return {CONF_STILL_IMAGE_URL: "template_error"}, None
verify_ssl = info.get(CONF_VERIFY_SSL)
auth = generate_auth(info)
try:
async_client = get_async_client(hass, verify_ssl=verify_ssl) | Verify that the still image is valid before we create an entity. | 12 | 63 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:
fmt = None
if not (url := info.get(CONF_STILL_IMAGE_URL)):
return {}, None
if not isinstance(url, template_helper.Template) and url:
url = cv.template(url)
url.hass = hass
try:
url = url.async_render(parse_result=False)
except TemplateError as err:
_LOGGER.error("Error parsing template %s: %s", url, err)
return {CONF_STILL_IMAGE_URL: "template_error"}, None
verify_ssl = info.get(CONF_VERIFY_SSL)
auth = generate_auth(info)
try:
async_client = get_async_client(hass, verify_ssl=verify_ssl)
```
###Assistant : Verify that the still image is valid before we create an entity.
|
1,902 | def normalize(self, a):
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
| normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
| 41 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def normalize(self, a):
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
```
###Assistant : normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
|
1,903 | def get_tables(self) -> StatusResponse:
query =
result = self.native_query(query)
df = result.data_frame
df = df[['TABLE_NAME' 'TABLE_TYPE']]
result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'})
return result
|
Return list of entities that will be accessible as tables.
Returns:
HandlerResponse
SELECT *
FROM INFORMATION_SCHEMA.TABLES
| 16 | 24 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_tables(self) -> StatusResponse:
query =
result = self.native_query(query)
df = result.data_frame
df = df[['TABLE_NAME' 'TABLE_TYPE']]
result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'})
return result
```
###Assistant :
Return list of entities that will be accessible as tables.
Returns:
HandlerResponse
SELECT *
FROM INFORMATION_SCHEMA.TABLES
|
1,904 | def get_expected_values(self, image_inputs, batched=False):
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
else:
h, w = image.shape[1], image.shape[2]
scale = self.size / min(w, h)
if h < w:
newh, neww = self.size, scale * w
else:
newh, neww = scale * h, self.size
max_size = int((1333 / 800) * self.size)
if max(newh, neww) > max_size:
scale = max_size / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
expected_height, expected_width = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision |
This function computes the expected height and width when providing images to ViltFeatureExtractor,
assuming do_resize is set to True with a scalar size and size_divisor.
| 25 | 131 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_expected_values(self, image_inputs, batched=False):
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
else:
h, w = image.shape[1], image.shape[2]
scale = self.size / min(w, h)
if h < w:
newh, neww = self.size, scale * w
else:
newh, neww = scale * h, self.size
max_size = int((1333 / 800) * self.size)
if max(newh, neww) > max_size:
scale = max_size / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
expected_height, expected_width = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
```
###Assistant :
This function computes the expected height and width when providing images to ViltFeatureExtractor,
assuming do_resize is set to True with a scalar size and size_divisor.
|
1,905 | def preprocss_testing_data(self, data):
num_augs = len(data[0]['img'])
batch_size = len(data)
aug_batch_imgs = []
aug_batch_data_samples = []
# adjust `images` and `data_samples` to a list of list
# outer list is test-time augmentation and inter list
# is batch dimension
for aug_index in range(num_augs):
batch_imgs = []
batch_data_samples = []
for batch_index in range(batch_size):
single_img = data[batch_index]['img'][aug_index]
# to gpu and normalize
single_img = single_img.to(self.device)
if self.to_rgb and single_img[0].size(0) == 3:
single_img = single_img[[2, 1, 0], ...]
single_img = (single_img - self.pixel_mean) / self.pixel_std
batch_imgs.append(single_img)
batch_data_samples.append(
data[batch_index]['data_sample'][aug_index])
aug_batch_imgs.append(stack_batch(batch_imgs))
aug_batch_data_samples.append(batch_data_samples)
return aug_batch_imgs, aug_batch_data_samples
| Process input data during training and testing phases.
Args:
data (list[dict]): The data to be processed, which
comes from dataloader. The list indicate the batch dimension.
Each dict contains these keys:
- `img` (list[Tensor]): Image tensor with different test-time
augmentation.
- `data_sample` (list[:obj:`GeneralData`]): Meta information
and annotations under different test-time augmentation.
Returns:
tuple: It should contain 2 items.
- aug_batch_imgs (list[Tensor]): List of batch image
tensor. The list indicate the test-time augmentations.
Note that the batch size always is 1
when do the augtest.
- aug_batch_data_samples
(list[list[:obj:`GeneralData`]], Optional):
The Data Samples. It usually includes information such as
`gt_instance`. Return None If the input datas does not
contain `data_sample`. The outer list indicate the
number of augmentations and inter list indicate the
batch dimension.
| 123 | 90 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def preprocss_testing_data(self, data):
num_augs = len(data[0]['img'])
batch_size = len(data)
aug_batch_imgs = []
aug_batch_data_samples = []
# adjust `images` and `data_samples` to a list of list
# outer list is test-time augmentation and inter list
# is batch dimension
for aug_index in range(num_augs):
batch_imgs = []
batch_data_samples = []
for batch_index in range(batch_size):
single_img = data[batch_index]['img'][aug_index]
# to gpu and normalize
single_img = single_img.to(self.device)
if self.to_rgb and single_img[0].size(0) == 3:
single_img = single_img[[2, 1, 0], ...]
single_img = (single_img - self.pixel_mean) / self.pixel_std
batch_imgs.append(single_img)
batch_data_samples.append(
data[batch_index]['data_sample'][aug_index])
aug_batch_imgs.append(stack_batch(batch_imgs))
aug_batch_data_samples.append(batch_data_samples)
return aug_batch_imgs, aug_batch_data_samples
```
###Assistant : Process input data during training and testing phases.
Args:
data (list[dict]): The data to be processed, which
comes from dataloader. The list indicate the batch dimension.
Each dict contains these keys:
- `img` (list[Tensor]): Image tensor with different test-time
augmentation.
- `data_sample` (list[:obj:`GeneralData`]): Meta information
and annotations under different test-time augmentation.
Returns:
tuple: It should contain 2 items.
- aug_batch_imgs (list[Tensor]): List of batch image
tensor. The list indicate the test-time augmentations.
Note that the batch size always is 1
when do the augtest.
- aug_batch_data_samples
(list[list[:obj:`GeneralData`]], Optional):
The Data Samples. It usually includes information such as
`gt_instance`. Return None If the input datas does not
contain `data_sample`. The outer list indicate the
number of augmentations and inter list indicate the
batch dimension.
|
1,906 | def list_templates() -> List[pathlib.Path]:
return (pathlib.Path(__file__).parent / "templates").glob("*.html.j2")
| List the available HTML templates.
Returns:
List[pathlib.Path]: A list of files with .html.j2 extensions inside
./templates/
| 16 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def list_templates() -> List[pathlib.Path]:
return (pathlib.Path(__file__).parent / "templates").glob("*.html.j2")
```
###Assistant : List the available HTML templates.
Returns:
List[pathlib.Path]: A list of files with .html.j2 extensions inside
./templates/
|
1,907 | def usable_pip_file(path): # type: (t.Optional[str]) -> bool
return bool(path) and os.path.exists(path) and bool(os.path.getsize(path))
# Cryptography
| Return True if the specified pip file is usable, otherwise False. | 11 | 15 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def usable_pip_file(path): # type: (t.Optional[str]) -> bool
return bool(path) and os.path.exists(path) and bool(os.path.getsize(path))
# Cryptography
```
###Assistant : Return True if the specified pip file is usable, otherwise False.
|
1,908 | def binary_accuracy(y_true, y_pred, threshold=0.5):
y_pred = tf.convert_to_tensor(y_pred)
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
return backend.mean(tf.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
@tf.__internal__.dispatch.add_dispatch_support | Calculates how often predictions match binary labels.
Standalone usage:
>>> y_true = [[1], [1], [0], [0]]
>>> y_pred = [[1], [1], [0], [0]]
>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
>>> assert m.shape == (4,)
>>> m.numpy()
array([1., 1., 1., 1.], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
threshold: (Optional) Float representing the threshold for deciding whether
prediction values are 1 or 0.
Returns:
Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
| 86 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def binary_accuracy(y_true, y_pred, threshold=0.5):
y_pred = tf.convert_to_tensor(y_pred)
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
return backend.mean(tf.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
@tf.__internal__.dispatch.add_dispatch_support
```
###Assistant : Calculates how often predictions match binary labels.
Standalone usage:
>>> y_true = [[1], [1], [0], [0]]
>>> y_pred = [[1], [1], [0], [0]]
>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
>>> assert m.shape == (4,)
>>> m.numpy()
array([1., 1., 1., 1.], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
threshold: (Optional) Float representing the threshold for deciding whether
prediction values are 1 or 0.
Returns:
Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
|
1,909 | def _object2proto(self) -> SyftOblvClient_PB:
return SyftOblvClient_PB(
token=self.token,
oblivious_user_id=self.oblivious_user_id,
cookies=self.cookies,
headers=self.headers,
timeout=self.timeout,
verify_ssl=self.verify_ssl,
)
| Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: SyftOblvClient_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
| 68 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _object2proto(self) -> SyftOblvClient_PB:
return SyftOblvClient_PB(
token=self.token,
oblivious_user_id=self.oblivious_user_id,
cookies=self.cookies,
headers=self.headers,
timeout=self.timeout,
verify_ssl=self.verify_ssl,
)
```
###Assistant : Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: SyftOblvClient_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
|
1,910 | def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
user_ids = list(
UserProfile.objects.filter(is_bot=False, realm=realm).values_list("id", flat=True)
)
do_deactivate_user(hamlet, acting_user=None)
with mock.patch("zerver.lib.digest.enough_traffic", return_value=True), mock.patch(
"zerver.lib.digest.send_future_email"
) as mock_send_email:
bulk_handle_digest_email(user_ids, 1)
emailed_user_ids = [
call_args[1]["to_user_ids"][0] for call_args in mock_send_email.call_args_list
]
self.assertEqual(
set(emailed_user_ids), set(user_id for user_id in user_ids if user_id != hamlet.id)
)
|
A user id may be added to the queue before the user is deactivated. In such a case,
the function responsible for sending the email should correctly skip them.
| 29 | 50 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
user_ids = list(
UserProfile.objects.filter(is_bot=False, realm=realm).values_list("id", flat=True)
)
do_deactivate_user(hamlet, acting_user=None)
with mock.patch("zerver.lib.digest.enough_traffic", return_value=True), mock.patch(
"zerver.lib.digest.send_future_email"
) as mock_send_email:
bulk_handle_digest_email(user_ids, 1)
emailed_user_ids = [
call_args[1]["to_user_ids"][0] for call_args in mock_send_email.call_args_list
]
self.assertEqual(
set(emailed_user_ids), set(user_id for user_id in user_ids if user_id != hamlet.id)
)
```
###Assistant :
A user id may be added to the queue before the user is deactivated. In such a case,
the function responsible for sending the email should correctly skip them.
|
1,911 | def test_driver_4():
args_list = [
'tests/tests.csv',
'-is', ',',
'-target', 'class',
'-g', '1',
'-p', '2',
'-cv', '3',
'-s', '42',
'-config', 'TPOT light',
'-v', '3'
]
args = _get_arg_parser().parse_args(args_list)
with captured_output() as (out, err):
tpot_driver(args)
ret_stdout = out.getvalue()
assert "TPOT settings" in ret_stdout
assert "Final Pareto front testing scores" in ret_stdout
try:
ret_val = float(ret_stdout.split('\n')[-2].split('\t')[1])
except Exception:
ret_val = -float('inf')
assert ret_val > 0.0
| Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 3. | 14 | 62 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_driver_4():
args_list = [
'tests/tests.csv',
'-is', ',',
'-target', 'class',
'-g', '1',
'-p', '2',
'-cv', '3',
'-s', '42',
'-config', 'TPOT light',
'-v', '3'
]
args = _get_arg_parser().parse_args(args_list)
with captured_output() as (out, err):
tpot_driver(args)
ret_stdout = out.getvalue()
assert "TPOT settings" in ret_stdout
assert "Final Pareto front testing scores" in ret_stdout
try:
ret_val = float(ret_stdout.split('\n')[-2].split('\t')[1])
except Exception:
ret_val = -float('inf')
assert ret_val > 0.0
```
###Assistant : Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 3.
|
1,912 | async def test_set_avatar(self) -> None:
handler = self.hs.get_sso_handler()
# Create a new user to set avatar for
reg_handler = self.hs.get_registration_handler()
user_id = self.get_success(reg_handler.register_user(approved=True))
self.assertTrue(
self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
)
# Ensure avatar is set on this newly created user,
# so no need to compare for the exact image
profile_handler = self.hs.get_profile_handler()
profile = self.get_success(profile_handler.get_profile(user_id))
self.assertIsNot(profile["avatar_url"], None)
| Tests successfully setting the avatar of a newly created user | 10 | 55 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_set_avatar(self) -> None:
handler = self.hs.get_sso_handler()
# Create a new user to set avatar for
reg_handler = self.hs.get_registration_handler()
user_id = self.get_success(reg_handler.register_user(approved=True))
self.assertTrue(
self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
)
# Ensure avatar is set on this newly created user,
# so no need to compare for the exact image
profile_handler = self.hs.get_profile_handler()
profile = self.get_success(profile_handler.get_profile(user_id))
self.assertIsNot(profile["avatar_url"], None)
```
###Assistant : Tests successfully setting the avatar of a newly created user
|
1,913 | def tune_decorated(api_key_file):
tuner = tune.Tuner(
decorated_train_function,
tune_config=tune.TuneConfig(
metric="loss",
mode="min",
),
param_space={
"mean": tune.grid_search([1, 2, 3, 4, 5]),
"sd": tune.uniform(0.2, 0.8),
"wandb": {"api_key_file": api_key_file, "project": "Wandb_example"},
},
)
tuner.fit()
| Example for using the @wandb_mixin decorator with the function API | 10 | 28 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def tune_decorated(api_key_file):
tuner = tune.Tuner(
decorated_train_function,
tune_config=tune.TuneConfig(
metric="loss",
mode="min",
),
param_space={
"mean": tune.grid_search([1, 2, 3, 4, 5]),
"sd": tune.uniform(0.2, 0.8),
"wandb": {"api_key_file": api_key_file, "project": "Wandb_example"},
},
)
tuner.fit()
```
###Assistant : Example for using the @wandb_mixin decorator with the function API
|
1,914 | def format_usage(self, usage):
# type: (str) -> str
msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " "))
return msg
|
Ensure there is only one newline between usage and the first heading
if there is no description.
| 17 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def format_usage(self, usage):
# type: (str) -> str
msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " "))
return msg
```
###Assistant :
Ensure there is only one newline between usage and the first heading
if there is no description.
|
1,915 | def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):
r
from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array
from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
first_indices = []
if first_index is not None:
first_indices.append(first_index)
if last_index is not None:
first_indices.append(last_index)
arr = convert_indexed_to_array(expr, first_indices=first_indices)
return convert_array_to_matrix(arr)
|
Parse expression of matrices with explicitly summed indices into a
matrix expression without indices, if possible.
This transformation expressed in mathematical notation:
`\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}`
Optional parameter ``first_index``: specify which free index to use as
the index starting the expression.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum
>>> from sympy.abc import i, j, k, l, N
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B
Transposition is detected:
>>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A.T*B
Detect the trace:
>>> expr = Sum(A[i, i], (i, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
Trace(A)
More complicated expressions:
>>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B.T*A.T
| 133 | 35 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):
r
from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array
from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
first_indices = []
if first_index is not None:
first_indices.append(first_index)
if last_index is not None:
first_indices.append(last_index)
arr = convert_indexed_to_array(expr, first_indices=first_indices)
return convert_array_to_matrix(arr)
```
###Assistant :
Parse expression of matrices with explicitly summed indices into a
matrix expression without indices, if possible.
This transformation expressed in mathematical notation:
`\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}`
Optional parameter ``first_index``: specify which free index to use as
the index starting the expression.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum
>>> from sympy.abc import i, j, k, l, N
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B
Transposition is detected:
>>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A.T*B
Detect the trace:
>>> expr = Sum(A[i, i], (i, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
Trace(A)
More complicated expressions:
>>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B.T*A.T
|
1,916 | def get_views(self):
q = f"SHOW FULL TABLES IN {self.database} WHERE TABLE_TYPE LIKE 'VIEW';"
result = self.native_query(q)
return result
|
Get more information about specific database views
| 7 | 18 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_views(self):
q = f"SHOW FULL TABLES IN {self.database} WHERE TABLE_TYPE LIKE 'VIEW';"
result = self.native_query(q)
return result
```
###Assistant :
Get more information about specific database views
|
1,917 | def _mac(model, obs, h):
B, n_agents = obs.size(0), obs.size(1)
if not isinstance(obs, dict):
obs = {"obs": obs}
obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}
h_flat = [s.reshape([B * n_agents, -1]) for s in h]
q_flat, h_flat = model(obs_agents_as_batches, h_flat, None)
return q_flat.reshape([B, n_agents, -1]), [
s.reshape([B, n_agents, -1]) for s in h_flat
]
| Forward pass of the multi-agent controller.
Args:
model: TorchModelV2 class
obs: Tensor of shape [B, n_agents, obs_size]
h: List of tensors of shape [B, n_agents, h_size]
Returns:
q_vals: Tensor of shape [B, n_agents, n_actions]
h: Tensor of shape [B, n_agents, h_size]
| 41 | 55 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _mac(model, obs, h):
B, n_agents = obs.size(0), obs.size(1)
if not isinstance(obs, dict):
obs = {"obs": obs}
obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}
h_flat = [s.reshape([B * n_agents, -1]) for s in h]
q_flat, h_flat = model(obs_agents_as_batches, h_flat, None)
return q_flat.reshape([B, n_agents, -1]), [
s.reshape([B, n_agents, -1]) for s in h_flat
]
```
###Assistant : Forward pass of the multi-agent controller.
Args:
model: TorchModelV2 class
obs: Tensor of shape [B, n_agents, obs_size]
h: List of tensors of shape [B, n_agents, h_size]
Returns:
q_vals: Tensor of shape [B, n_agents, n_actions]
h: Tensor of shape [B, n_agents, h_size]
|
1,918 | def _get_count(self):
has_meta = all(val is not None for val in self._alignments.video_meta_data.values())
retval = len(self._alignments.video_meta_data["pts_time"]) if has_meta else None
logger.debug("Frame count from alignments file: (has_meta: %s, %s", has_meta, retval)
return retval
| If the alignments file has been run through the manual tool, then it will hold video
meta information, meaning that the count of frames in the alignment file can be relied
on to be accurate.
Returns
-------
int or ``None``
For video input which contain video meta-data in the alignments file then the count of
frames is returned. In all other cases ``None`` is returned
| 65 | 31 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_count(self):
has_meta = all(val is not None for val in self._alignments.video_meta_data.values())
retval = len(self._alignments.video_meta_data["pts_time"]) if has_meta else None
logger.debug("Frame count from alignments file: (has_meta: %s, %s", has_meta, retval)
return retval
```
###Assistant : If the alignments file has been run through the manual tool, then it will hold video
meta information, meaning that the count of frames in the alignment file can be relied
on to be accurate.
Returns
-------
int or ``None``
For video input which contain video meta-data in the alignments file then the count of
frames is returned. In all other cases ``None`` is returned
|
1,919 | def test_sparse1_with_non_sparse_components():
fit_then_transform(
sparse1_paratial_1h.todense(),
sparse1,
categorical_features=[True, False]
)
| Test fit_transform a sparse matrix with specifying categorical_features. | 8 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_sparse1_with_non_sparse_components():
fit_then_transform(
sparse1_paratial_1h.todense(),
sparse1,
categorical_features=[True, False]
)
```
###Assistant : Test fit_transform a sparse matrix with specifying categorical_features.
|
1,920 | def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):
MNAME = "hi"
foutl = get_io_paths(hello_world_f90, mname=MNAME)
ipath = foutl.f90inp
monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split())
with util.switchdir(ipath.parent):
f2pycli()
# Always generate C module
assert Path.exists(foutl.cmodf)
# File contains a function, check for F77 wrappers
assert Path.exists(foutl.wrap77)
| Checks the generation of files based on a module name
CLI :: -m
| 13 | 41 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):
MNAME = "hi"
foutl = get_io_paths(hello_world_f90, mname=MNAME)
ipath = foutl.f90inp
monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split())
with util.switchdir(ipath.parent):
f2pycli()
# Always generate C module
assert Path.exists(foutl.cmodf)
# File contains a function, check for F77 wrappers
assert Path.exists(foutl.wrap77)
```
###Assistant : Checks the generation of files based on a module name
CLI :: -m
|
1,921 | def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None
assert isinstance(args, EnvironmentConfig)
with delegation_context(args, host_state):
if isinstance(args, TestConfig):
args.metadata.ci_provider = get_ci_provider().code
make_dirs(ResultType.TMP.path)
with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
args.metadata.to_file(args.metadata_path)
try:
delegate_command(args, host_state, exclude, require)
finally:
args.metadata_path = None
else:
delegate_command(args, host_state, exclude, require)
| Delegate execution of ansible-test to another environment. | 7 | 51 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None
assert isinstance(args, EnvironmentConfig)
with delegation_context(args, host_state):
if isinstance(args, TestConfig):
args.metadata.ci_provider = get_ci_provider().code
make_dirs(ResultType.TMP.path)
with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
args.metadata.to_file(args.metadata_path)
try:
delegate_command(args, host_state, exclude, require)
finally:
args.metadata_path = None
else:
delegate_command(args, host_state, exclude, require)
```
###Assistant : Delegate execution of ansible-test to another environment.
|
1,922 | def require_torch_non_multi_gpu(test_case):
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
|
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
| 13 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def require_torch_non_multi_gpu(test_case):
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
```
###Assistant :
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
|
1,923 | def _ReturnKeyHandler(self, event):
# if the element is disabled, ignore the event
if self.Disabled:
return
MyForm = self.ParentForm
button_element = self._FindReturnKeyBoundButton(MyForm)
if button_element is not None:
button_element.ButtonCallBack()
|
Internal callback for the ENTER / RETURN key. Results in calling the ButtonCallBack for element that has the return key bound to it, just as if button was clicked.
:param event:
:type event:
| 33 | 27 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _ReturnKeyHandler(self, event):
# if the element is disabled, ignore the event
if self.Disabled:
return
MyForm = self.ParentForm
button_element = self._FindReturnKeyBoundButton(MyForm)
if button_element is not None:
button_element.ButtonCallBack()
```
###Assistant :
Internal callback for the ENTER / RETURN key. Results in calling the ButtonCallBack for element that has the return key bound to it, just as if button was clicked.
:param event:
:type event:
|
1,924 | def uint64_frame():
return DataFrame(
{"A": np.arange(3), "B": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64
)
@pytest.fixture |
Fixture for DataFrame with uint64 values
Columns are ['A', 'B']
| 10 | 17 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def uint64_frame():
return DataFrame(
{"A": np.arange(3), "B": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64
)
@pytest.fixture
```
###Assistant :
Fixture for DataFrame with uint64 values
Columns are ['A', 'B']
|
1,925 | def test_null_annotation(self):
book = Book.objects.annotate(
no_value=Value(None, output_field=IntegerField())
).first()
self.assertIsNone(book.no_value)
|
Annotating None onto a model round-trips
| 6 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_null_annotation(self):
book = Book.objects.annotate(
no_value=Value(None, output_field=IntegerField())
).first()
self.assertIsNone(book.no_value)
```
###Assistant :
Annotating None onto a model round-trips
|
1,926 | def icosahedral_graph(create_using=None):
description = [
"adjacencylist",
"Platonic Icosahedral Graph",
12,
[
[2, 6, 8, 9, 12],
[3, 6, 7, 9],
[4, 7, 9, 10],
[5, 7, 10, 11],
[6, 7, 11, 12],
[7, 12],
[],
[9, 10, 11, 12],
[10],
[11],
[12],
[],
],
]
G = make_small_undirected_graph(description, create_using)
return G
|
Returns the Platonic Icosahedral graph.
The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph
whose nodes have the connectivity of the icosahedron. It is undirected,
regular and Hamiltonian [1]_.
Parameters
----------
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : networkx Graph
Icosahedral graph with 12 nodes and 30 edges.
References
----------
.. [1] https://mathworld.wolfram.com/IcosahedralGraph.html
| 73 | 51 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def icosahedral_graph(create_using=None):
description = [
"adjacencylist",
"Platonic Icosahedral Graph",
12,
[
[2, 6, 8, 9, 12],
[3, 6, 7, 9],
[4, 7, 9, 10],
[5, 7, 10, 11],
[6, 7, 11, 12],
[7, 12],
[],
[9, 10, 11, 12],
[10],
[11],
[12],
[],
],
]
G = make_small_undirected_graph(description, create_using)
return G
```
###Assistant :
Returns the Platonic Icosahedral graph.
The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph
whose nodes have the connectivity of the icosahedron. It is undirected,
regular and Hamiltonian [1]_.
Parameters
----------
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : networkx Graph
Icosahedral graph with 12 nodes and 30 edges.
References
----------
.. [1] https://mathworld.wolfram.com/IcosahedralGraph.html
|
1,927 | def evaluate(model, criterion, metric, data_loader):
model.eval()
metric.reset()
losses = []
for batch in tqdm(data_loader):
input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch
max_batch_len = input_ids.shape[-1]
ent_mask = paddle.unsqueeze(masks, axis=2)
spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True)
spo_mask = paddle.unsqueeze(spo_mask, axis=1)
logits = model(input_ids, token_type_ids, position_ids)
ent_loss = criterion(
logits[0], ent_label[0], weight=ent_mask, reduction='sum')
spo_loss = criterion(
logits[1], spo_label[0], weight=spo_mask, reduction='sum')
loss = ent_loss + spo_loss
losses.append(loss.numpy())
lengths = paddle.sum(masks, axis=-1)
correct = metric.compute(lengths, logits[0], logits[1], ent_label[1],
spo_label[1])
metric.update(correct)
results = metric.accumulate()
print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' %
(np.mean(losses), results['entity'][2], results['spo'][2]))
model.train()
metric.reset()
|
Given a dataset, it evals model and compute the metric.
Args:
model(obj:`paddle.nn.Layer`): A model to classify texts.
dataloader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.
criterion(`paddle.nn.functional`): It can compute the loss.
metric(obj:`paddle.metric.Metric`): The evaluation metric.
| 34 | 93 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def evaluate(model, criterion, metric, data_loader):
model.eval()
metric.reset()
losses = []
for batch in tqdm(data_loader):
input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch
max_batch_len = input_ids.shape[-1]
ent_mask = paddle.unsqueeze(masks, axis=2)
spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True)
spo_mask = paddle.unsqueeze(spo_mask, axis=1)
logits = model(input_ids, token_type_ids, position_ids)
ent_loss = criterion(
logits[0], ent_label[0], weight=ent_mask, reduction='sum')
spo_loss = criterion(
logits[1], spo_label[0], weight=spo_mask, reduction='sum')
loss = ent_loss + spo_loss
losses.append(loss.numpy())
lengths = paddle.sum(masks, axis=-1)
correct = metric.compute(lengths, logits[0], logits[1], ent_label[1],
spo_label[1])
metric.update(correct)
results = metric.accumulate()
print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' %
(np.mean(losses), results['entity'][2], results['spo'][2]))
model.train()
metric.reset()
```
###Assistant :
Given a dataset, it evals model and compute the metric.
Args:
model(obj:`paddle.nn.Layer`): A model to classify texts.
dataloader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.
criterion(`paddle.nn.functional`): It can compute the loss.
metric(obj:`paddle.metric.Metric`): The evaluation metric.
|
1,928 | def get_fields(self, include_parents=True, include_hidden=False):
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(
include_parents=include_parents, include_hidden=include_hidden
)
|
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
| 53 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_fields(self, include_parents=True, include_hidden=False):
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(
include_parents=include_parents, include_hidden=include_hidden
)
```
###Assistant :
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
|
1,929 | def _key_to_file(self, session_key=None):
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(VALID_KEY_CHARS):
raise InvalidSessionKey("Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
|
Get the file associated with this session key.
| 8 | 48 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _key_to_file(self, session_key=None):
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(VALID_KEY_CHARS):
raise InvalidSessionKey("Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
```
###Assistant :
Get the file associated with this session key.
|
1,930 | def check_against_chunks(self, chunks):
# type: (Iterator[bytes]) -> None
gots = {}
for hash_name in self._allowed.keys():
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise InstallationError(f"Unknown hash name: {hash_name}")
for chunk in chunks:
for hash in gots.values():
hash.update(chunk)
for hash_name, got in gots.items():
if got.hexdigest() in self._allowed[hash_name]:
return
self._raise(gots)
| Check good hashes against ones built from iterable of chunks of
data.
Raise HashMismatch if none match.
| 17 | 47 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def check_against_chunks(self, chunks):
# type: (Iterator[bytes]) -> None
gots = {}
for hash_name in self._allowed.keys():
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise InstallationError(f"Unknown hash name: {hash_name}")
for chunk in chunks:
for hash in gots.values():
hash.update(chunk)
for hash_name, got in gots.items():
if got.hexdigest() in self._allowed[hash_name]:
return
self._raise(gots)
```
###Assistant : Check good hashes against ones built from iterable of chunks of
data.
Raise HashMismatch if none match.
|
1,931 | def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights, valid_mask
| Expand onehot labels to match the size of prediction. | 9 | 61 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights, valid_mask
```
###Assistant : Expand onehot labels to match the size of prediction.
|
1,932 | def test_sequence_input_types(self, input_type):
if not tf.executing_eagerly():
self.skipTest("Improved checking is only present in data_adapter.")
xy_function, x_function = self._make_sequence_input_functions(
input_type
)
fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {}
if input_type == "generator":
fit_kwargs["steps_per_epoch"] = 4
evaluate_kwargs["steps"] = 4
predict_kwargs["steps"] = 4
model = test_utils.get_small_mlp(1, 1, 1)
model.compile(
loss="mse",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(xy_function(use_namedtuple=False), **fit_kwargs)
model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs)
model.predict(x_function(use_namedtuple=False), **predict_kwargs)
| Ensure that namedtuples and tuples are plumbed identically. | 8 | 55 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_sequence_input_types(self, input_type):
if not tf.executing_eagerly():
self.skipTest("Improved checking is only present in data_adapter.")
xy_function, x_function = self._make_sequence_input_functions(
input_type
)
fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {}
if input_type == "generator":
fit_kwargs["steps_per_epoch"] = 4
evaluate_kwargs["steps"] = 4
predict_kwargs["steps"] = 4
model = test_utils.get_small_mlp(1, 1, 1)
model.compile(
loss="mse",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(xy_function(use_namedtuple=False), **fit_kwargs)
model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs)
model.predict(x_function(use_namedtuple=False), **predict_kwargs)
```
###Assistant : Ensure that namedtuples and tuples are plumbed identically.
|
1,933 | def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):
if not posting_date:
posting_date = nowdate()
values, condition = [posting_date], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt])
condition += "and exists (\
select name from `tabWarehouse` wh where wh.name = sle.warehouse\
and wh.lft >= %s and wh.rgt <= %s)"
else:
values.append(warehouse)
condition += " AND warehouse = %s"
if item_code:
values.append(item_code)
condition += " AND item_code = %s"
stock_ledger_entries = frappe.db.sql(
.format(
condition
),
values,
as_dict=1,
)
sle_map = {}
for sle in stock_ledger_entries:
if not (sle.item_code, sle.warehouse) in sle_map:
sle_map[(sle.item_code, sle.warehouse)] = flt(sle.stock_value)
return sum(sle_map.values())
@frappe.whitelist() |
SELECT item_code, stock_value, name, warehouse
FROM `tabStock Ledger Entry` sle
WHERE posting_date <= %s {0}
and is_cancelled = 0
ORDER BY timestamp(posting_date, posting_time) DESC, creation DESC
| 26 | 100 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):
if not posting_date:
posting_date = nowdate()
values, condition = [posting_date], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt])
condition += "and exists (\
select name from `tabWarehouse` wh where wh.name = sle.warehouse\
and wh.lft >= %s and wh.rgt <= %s)"
else:
values.append(warehouse)
condition += " AND warehouse = %s"
if item_code:
values.append(item_code)
condition += " AND item_code = %s"
stock_ledger_entries = frappe.db.sql(
.format(
condition
),
values,
as_dict=1,
)
sle_map = {}
for sle in stock_ledger_entries:
if not (sle.item_code, sle.warehouse) in sle_map:
sle_map[(sle.item_code, sle.warehouse)] = flt(sle.stock_value)
return sum(sle_map.values())
@frappe.whitelist()
```
###Assistant :
SELECT item_code, stock_value, name, warehouse
FROM `tabStock Ledger Entry` sle
WHERE posting_date <= %s {0}
and is_cancelled = 0
ORDER BY timestamp(posting_date, posting_time) DESC, creation DESC
|
1,934 | def _is_installed_rpm(name):
log.debug(f"_is_installed_rpm '{name}'")
cmd = ["/usr/bin/rpm", "-q", name]
return __salt__["cmd.retcode"](cmd) == 0
|
Returns True if the rpm package is installed. Otherwise returns False.
| 11 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _is_installed_rpm(name):
log.debug(f"_is_installed_rpm '{name}'")
cmd = ["/usr/bin/rpm", "-q", name]
return __salt__["cmd.retcode"](cmd) == 0
```
###Assistant :
Returns True if the rpm package is installed. Otherwise returns False.
|
1,935 | def iter_tree_files(root, on_error=None, follow_links=None):
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links):
if not entry.is_dir(follow_links):
yield entry.path
# Alias `iter_tree_files()` as `iter_tree()`.
iter_tree = iter_tree_files
|
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
| 90 | 45 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def iter_tree_files(root, on_error=None, follow_links=None):
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links):
if not entry.is_dir(follow_links):
yield entry.path
# Alias `iter_tree_files()` as `iter_tree()`.
iter_tree = iter_tree_files
```
###Assistant :
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
|
1,936 | def _get_url_from_path(path, name):
# type: (str, str) -> Optional[str]
if _looks_like_path(name) and os.path.isdir(path):
if is_installable_dir(path):
return path_to_url(path)
raise InstallationError(
f"Directory {name!r} is not installable. Neither 'setup.py' "
"nor 'pyproject.toml' found."
)
if not is_archive_file(path):
return None
if os.path.isfile(path):
return path_to_url(path)
urlreq_parts = name.split('@', 1)
if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
# If the path contains '@' and the part before it does not look
# like a path, try to treat it as a PEP 440 URL req instead.
return None
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
return path_to_url(path)
|
First, it checks whether a provided path is an installable directory
(e.g. it has a setup.py). If it is, returns the path.
If false, check if the path is an archive file (such as a .whl).
The function checks if the path is a file. If false, if the path has
an @, it will treat it as a PEP 440 URL requirement and return the path.
| 67 | 100 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_url_from_path(path, name):
# type: (str, str) -> Optional[str]
if _looks_like_path(name) and os.path.isdir(path):
if is_installable_dir(path):
return path_to_url(path)
raise InstallationError(
f"Directory {name!r} is not installable. Neither 'setup.py' "
"nor 'pyproject.toml' found."
)
if not is_archive_file(path):
return None
if os.path.isfile(path):
return path_to_url(path)
urlreq_parts = name.split('@', 1)
if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
# If the path contains '@' and the part before it does not look
# like a path, try to treat it as a PEP 440 URL req instead.
return None
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
return path_to_url(path)
```
###Assistant :
First, it checks whether a provided path is an installable directory
(e.g. it has a setup.py). If it is, returns the path.
If false, check if the path is an archive file (such as a .whl).
The function checks if the path is a file. If false, if the path has
an @, it will treat it as a PEP 440 URL requirement and return the path.
|
1,937 | def test_install_fileset_with_bff_extension():
installp_call = MagicMock(return_value={"retcode": 0, "stdout": ""})
fileset_pkg_name = (
"/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff"
)
list_pkgs_mock = MagicMock(
side_effect=[{"bos.rte.printers": "7.1.6.0"}, {"bos.rte.printers": "7.2.4.0"}]
)
with patch("pathlib.Path.is_file", return_value=True):
with patch.dict(
aixpkg.__salt__,
{"cmd.run_all": installp_call, "config.get": MagicMock(return_value=False)},
), patch.object(aixpkg, "list_pkgs", list_pkgs_mock):
result = aixpkg.install(fileset_pkg_name)
assert installp_call.call_count == 1
installp_call.assert_any_call(
"/usr/sbin/installp -acYXg -d /cecc/repos/aix72/TL3/BASE/installp/ppc bos.rte.printers_7.2.2.0.bff",
python_shell=False,
)
expected = {"bos.rte.printers": {"old": "7.1.6.0", "new": "7.2.4.0"}}
assert result == expected
|
Test install of fileset with bff extension
| 7 | 61 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_install_fileset_with_bff_extension():
installp_call = MagicMock(return_value={"retcode": 0, "stdout": ""})
fileset_pkg_name = (
"/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff"
)
list_pkgs_mock = MagicMock(
side_effect=[{"bos.rte.printers": "7.1.6.0"}, {"bos.rte.printers": "7.2.4.0"}]
)
with patch("pathlib.Path.is_file", return_value=True):
with patch.dict(
aixpkg.__salt__,
{"cmd.run_all": installp_call, "config.get": MagicMock(return_value=False)},
), patch.object(aixpkg, "list_pkgs", list_pkgs_mock):
result = aixpkg.install(fileset_pkg_name)
assert installp_call.call_count == 1
installp_call.assert_any_call(
"/usr/sbin/installp -acYXg -d /cecc/repos/aix72/TL3/BASE/installp/ppc bos.rte.printers_7.2.2.0.bff",
python_shell=False,
)
expected = {"bos.rte.printers": {"old": "7.1.6.0", "new": "7.2.4.0"}}
assert result == expected
```
###Assistant :
Test install of fileset with bff extension
|
1,938 | def is_user_in_allow_list(block_list):
return frappe.session.user in frappe.db.sql_list(
,
block_list,
)
| select allow_user
from `tabLeave Block List Allow` where parent=%s | 9 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def is_user_in_allow_list(block_list):
return frappe.session.user in frappe.db.sql_list(
,
block_list,
)
```
###Assistant : select allow_user
from `tabLeave Block List Allow` where parent=%s
|
1,939 | def test_normalization(push_channel):
types = {
"list": list,
}
msgs = [
{"list": tuple([1, 2, 3])},
]
for msg in msgs:
ret = push_channel.send(msg, timeout=5, tries=1)
for key, value in ret["load"].items():
assert types[key] == type(value)
|
Since we use msgpack, we need to test that list types are converted to lists
| 15 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_normalization(push_channel):
types = {
"list": list,
}
msgs = [
{"list": tuple([1, 2, 3])},
]
for msg in msgs:
ret = push_channel.send(msg, timeout=5, tries=1)
for key, value in ret["load"].items():
assert types[key] == type(value)
```
###Assistant :
Since we use msgpack, we need to test that list types are converted to lists
|
1,940 | def to_perioddelta(self, freq) -> TimedeltaArray:
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=find_stack_level(inspect.currentframe()),
)
from pandas.core.arrays.timedeltas import TimedeltaArray
if self._ndarray.dtype != "M8[ns]":
raise NotImplementedError("Only supported for nanosecond resolution.")
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
|
Calculate deltas between self values and self converted to Periods at a freq.
Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
| 26 | 73 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def to_perioddelta(self, freq) -> TimedeltaArray:
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=find_stack_level(inspect.currentframe()),
)
from pandas.core.arrays.timedeltas import TimedeltaArray
if self._ndarray.dtype != "M8[ns]":
raise NotImplementedError("Only supported for nanosecond resolution.")
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
```
###Assistant :
Calculate deltas between self values and self converted to Periods at a freq.
Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
|
1,941 | def set(self, components):
if len(components) > 0:
self.__components = components
else:
raise Exception("please give any vector")
|
input: new components
changes the components of the vector.
replace the components with newer one.
| 15 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def set(self, components):
if len(components) > 0:
self.__components = components
else:
raise Exception("please give any vector")
```
###Assistant :
input: new components
changes the components of the vector.
replace the components with newer one.
|
1,942 | def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]:
if self._best_task_id is not None:
compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth'))
compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth'))
with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f:
config_list = json_tricks.load(f)
return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list
return None
|
Returns
-------
Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]
If self._best_task_id is not None,
return best task id, best compact model, masks on the compact model, score, config list used in this task.
| 33 | 43 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]:
if self._best_task_id is not None:
compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth'))
compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth'))
with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f:
config_list = json_tricks.load(f)
return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list
return None
```
###Assistant :
Returns
-------
Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]
If self._best_task_id is not None,
return best task id, best compact model, masks on the compact model, score, config list used in this task.
|
1,943 | def set_variation_by_axes(self, axes):
try:
self.font.setvaraxes(axes)
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
|
:param axes: A list of values for each axis.
:exception OSError: If the font is not a variation font.
| 19 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def set_variation_by_axes(self, axes):
try:
self.font.setvaraxes(axes)
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
```
###Assistant :
:param axes: A list of values for each axis.
:exception OSError: If the font is not a variation font.
|
1,944 | def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
| Invoke user function and return trace function for line event.
If the debugger stops on the current line, invoke
self.user_line(). Raise BdbQuit if self.quitting is set.
Return self.trace_dispatch to continue tracing in this scope.
| 34 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
```
###Assistant : Invoke user function and return trace function for line event.
If the debugger stops on the current line, invoke
self.user_line(). Raise BdbQuit if self.quitting is set.
Return self.trace_dispatch to continue tracing in this scope.
|
1,945 | def clear(self) -> None:
self._in_blocks.clear()
self._snapshot_blocks = None
self._snapshot_stats = None
# We're erasing the snapshot, so put all stages into the "after snapshot"
# bucket.
self._stages_after_snapshot = (
self._stages_before_snapshot + self._stages_after_snapshot
)
self._stages_before_snapshot = []
| Clear all cached block references of this plan, including input blocks.
This will render the plan un-executable unless the root is a LazyBlockList. | 23 | 36 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def clear(self) -> None:
self._in_blocks.clear()
self._snapshot_blocks = None
self._snapshot_stats = None
# We're erasing the snapshot, so put all stages into the "after snapshot"
# bucket.
self._stages_after_snapshot = (
self._stages_before_snapshot + self._stages_after_snapshot
)
self._stages_before_snapshot = []
```
###Assistant : Clear all cached block references of this plan, including input blocks.
This will render the plan un-executable unless the root is a LazyBlockList.
|
1,946 | def _global_clustering(self, X=None):
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, Integral):
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by BIRCH is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters),
ConvergenceWarning,
)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
if compute_labels:
self.labels_ = self._predict(X)
|
Global clustering for the subclusters obtained after fitting
| 8 | 131 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _global_clustering(self, X=None):
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, Integral):
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by BIRCH is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters),
ConvergenceWarning,
)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
if compute_labels:
self.labels_ = self._predict(X)
```
###Assistant :
Global clustering for the subclusters obtained after fitting
|
1,947 | def test_prefill_form_backcompat(extras, expected):
mock_form = mock.Mock()
mock_form.data = {"conn_id": "test", "extra": json.dumps(extras), "conn_type": "test"}
cmv = ConnectionModelView()
cmv.extra_fields = ['extra__test__my_param']
# this is set by `lazy_add_provider_discovered_options_to_connection_form`
cmv.extra_field_name_mapping['extra__test__my_param'] = 'my_param'
cmv.prefill_form(form=mock_form, pk=1)
assert mock_form.extra__test__my_param.data == expected
@pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field'])
@mock.patch('airflow.utils.module_loading.import_string')
@mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock) |
When populating custom fields in the connection form we should first check for the non-prefixed
value (since prefixes in extra are deprecated) and then fallback to the prefixed value.
Either way, the field is known internally to the model view as the prefixed value.
| 44 | 41 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_prefill_form_backcompat(extras, expected):
mock_form = mock.Mock()
mock_form.data = {"conn_id": "test", "extra": json.dumps(extras), "conn_type": "test"}
cmv = ConnectionModelView()
cmv.extra_fields = ['extra__test__my_param']
# this is set by `lazy_add_provider_discovered_options_to_connection_form`
cmv.extra_field_name_mapping['extra__test__my_param'] = 'my_param'
cmv.prefill_form(form=mock_form, pk=1)
assert mock_form.extra__test__my_param.data == expected
@pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field'])
@mock.patch('airflow.utils.module_loading.import_string')
@mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)
```
###Assistant :
When populating custom fields in the connection form we should first check for the non-prefixed
value (since prefixes in extra are deprecated) and then fallback to the prefixed value.
Either way, the field is known internally to the model view as the prefixed value.
|
1,948 | def tokenize_query(query):
result = defaultdict(list)
query_params = defaultdict(list)
tokens = split_query_into_tokens(query)
for token in tokens:
if token.upper() in ["OR", "AND"] or token.strip("()") == "":
continue
state = "query"
for idx, char in enumerate(token):
next_char = token[idx + 1] if idx < len(token) - 1 else None
if idx == 0 and char in ('"', "'", ":"):
break
if char == ":":
if next_char in (":", " "):
state = "query"
else:
state = "tags"
break
query_params[state].append(token)
if "query" in query_params:
result["query"] = [format_query(query) for query in query_params["query"]]
for tag in query_params["tags"]:
key, value = format_tag(tag)
result[key].append(value)
return dict(result)
|
Tokenizes a standard Sentry search query.
Example:
>>> query = 'is:resolved foo bar tag:value'
>>> tokenize_query(query)
{
'is': ['resolved'],
'query': ['foo', 'bar'],
'tag': ['value'],
}
Has a companion implementation in static/app/utils/tokenizeSearch.tsx
| 31 | 98 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def tokenize_query(query):
result = defaultdict(list)
query_params = defaultdict(list)
tokens = split_query_into_tokens(query)
for token in tokens:
if token.upper() in ["OR", "AND"] or token.strip("()") == "":
continue
state = "query"
for idx, char in enumerate(token):
next_char = token[idx + 1] if idx < len(token) - 1 else None
if idx == 0 and char in ('"', "'", ":"):
break
if char == ":":
if next_char in (":", " "):
state = "query"
else:
state = "tags"
break
query_params[state].append(token)
if "query" in query_params:
result["query"] = [format_query(query) for query in query_params["query"]]
for tag in query_params["tags"]:
key, value = format_tag(tag)
result[key].append(value)
return dict(result)
```
###Assistant :
Tokenizes a standard Sentry search query.
Example:
>>> query = 'is:resolved foo bar tag:value'
>>> tokenize_query(query)
{
'is': ['resolved'],
'query': ['foo', 'bar'],
'tag': ['value'],
}
Has a companion implementation in static/app/utils/tokenizeSearch.tsx
|
1,949 | def save_model(model, filepath, overwrite=True, save_format=None, **kwargs):
save_format = get_save_format(filepath, save_format)
if save_format not in ("keras", "tf", "h5", "keras_v3"):
raise ValueError(
"Unknown `save_format` argument. Expected one of "
"'keras', 'tf', or 'h5'. "
f"Received: save_format{save_format}"
)
if save_format == "keras_v3" or (
saving_lib.saving_v3_enabled() and save_format == "keras"
):
# If file exists and should not be overwritten.
try:
exists = os.path.exists(filepath)
except TypeError:
exists = False
if exists and not overwrite:
proceed = io_utils.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
if kwargs:
raise ValueError(
"The following argument(s) are not supported "
f"with the native Keras format: {list(kwargs.keys())}"
)
saving_lib.save_model(model, filepath)
else:
# Legacy case
return legacy_sm_saving_lib.save_model(
model,
filepath,
overwrite=overwrite,
save_format=save_format,
**kwargs,
)
@keras_export("keras.models.load_model") | Saves a model as a TensorFlow SavedModel or HDF5 file.
See the [Serialization and Saving guide](
https://keras.io/guides/serialization_and_saving/) for details.
Args:
model: Keras model instance to be saved.
filepath: `str` or `pathlib.Path` object. Path where to save the model.
overwrite: Whether we should overwrite any existing model at the target
location, or instead ask the user via an interactive prompt.
save_format: Either `"keras"`, `"tf"`, `"h5"`,
indicating whether to save the model
in the native Keras format (`.keras`),
in the TensorFlow SavedModel format (referred to as "SavedModel"
below), or in the legacy HDF5 format (`.h5`).
Defaults to `"tf"` in TF 2.X, and `"h5"` in TF 1.X.
SavedModel format arguments:
include_optimizer: Only applied to SavedModel and legacy HDF5 formats.
If False, do not save the optimizer state. Defaults to True.
signatures: Only applies to SavedModel format. Signatures to save
with the SavedModel. See the `signatures` argument in
`tf.saved_model.save` for details.
options: Only applies to SavedModel format.
`tf.saved_model.SaveOptions` object that specifies SavedModel
saving options.
save_traces: Only applies to SavedModel format. When enabled, the
SavedModel will store the function traces for each layer. This
can be disabled, so that only the configs of each layer are stored.
Defaults to `True`. Disabling this will decrease serialization time
and reduce file size, but it requires that all custom layers/models
implement a `get_config()` method.
Example:
```python
model = tf.keras.Sequential([
tf.keras.layers.Dense(5, input_shape=(3,)),
tf.keras.layers.Softmax()])
model.save("model.keras")
loaded_model = tf.keras.models.load_model("model.keras")
x = tf.random.uniform((10, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
Note that `model.save()` is an alias for `tf.keras.models.save_model()`.
The SavedModel or HDF5 file contains:
- The model's configuration (architecture)
- The model's weights
- The model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state, without any of
the code used for model definition or training.
Note that the model weights may have different scoped names after being
loaded. Scoped names include the model/layer names, such as
`"dense_1/kernel:0"`. It is recommended that you use the layer properties to
access specific variables, e.g. `model.get_layer("dense_1").kernel`.
__SavedModel serialization format__
With `save_format="tf"`, the model and all trackable objects attached
to the it (e.g. layers and variables) are saved as a TensorFlow SavedModel.
The model config, weights, and optimizer are included in the SavedModel.
Additionally, for every Keras layer attached to the model, the SavedModel
stores:
* The config and metadata -- e.g. name, dtype, trainable status
* Traced call and loss functions, which are stored as TensorFlow
subgraphs.
The traced functions allow the SavedModel format to save and load custom
layers without the original class definition.
You can choose to not save the traced functions by disabling the
`save_traces` option. This will decrease the time it takes to save the model
and the amount of disk space occupied by the output SavedModel. If you
enable this option, then you _must_ provide all custom class definitions
when loading the model. See the `custom_objects` argument in
`tf.keras.models.load_model`.
| 472 | 110 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def save_model(model, filepath, overwrite=True, save_format=None, **kwargs):
save_format = get_save_format(filepath, save_format)
if save_format not in ("keras", "tf", "h5", "keras_v3"):
raise ValueError(
"Unknown `save_format` argument. Expected one of "
"'keras', 'tf', or 'h5'. "
f"Received: save_format{save_format}"
)
if save_format == "keras_v3" or (
saving_lib.saving_v3_enabled() and save_format == "keras"
):
# If file exists and should not be overwritten.
try:
exists = os.path.exists(filepath)
except TypeError:
exists = False
if exists and not overwrite:
proceed = io_utils.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
if kwargs:
raise ValueError(
"The following argument(s) are not supported "
f"with the native Keras format: {list(kwargs.keys())}"
)
saving_lib.save_model(model, filepath)
else:
# Legacy case
return legacy_sm_saving_lib.save_model(
model,
filepath,
overwrite=overwrite,
save_format=save_format,
**kwargs,
)
@keras_export("keras.models.load_model")
```
###Assistant : Saves a model as a TensorFlow SavedModel or HDF5 file.
See the [Serialization and Saving guide](
https://keras.io/guides/serialization_and_saving/) for details.
Args:
model: Keras model instance to be saved.
filepath: `str` or `pathlib.Path` object. Path where to save the model.
overwrite: Whether we should overwrite any existing model at the target
location, or instead ask the user via an interactive prompt.
save_format: Either `"keras"`, `"tf"`, `"h5"`,
indicating whether to save the model
in the native Keras format (`.keras`),
in the TensorFlow SavedModel format (referred to as "SavedModel"
below), or in the legacy HDF5 format (`.h5`).
Defaults to `"tf"` in TF 2.X, and `"h5"` in TF 1.X.
SavedModel format arguments:
include_optimizer: Only applied to SavedModel and legacy HDF5 formats.
If False, do not save the optimizer state. Defaults to True.
signatures: Only applies to SavedModel format. Signatures to save
with the SavedModel. See the `signatures` argument in
`tf.saved_model.save` for details.
options: Only applies to SavedModel format.
`tf.saved_model.SaveOptions` object that specifies SavedModel
saving options.
save_traces: Only applies to SavedModel format. When enabled, the
SavedModel will store the function traces for each layer. This
can be disabled, so that only the configs of each layer are stored.
Defaults to `True`. Disabling this will decrease serialization time
and reduce file size, but it requires that all custom layers/models
implement a `get_config()` method.
Example:
```python
model = tf.keras.Sequential([
tf.keras.layers.Dense(5, input_shape=(3,)),
tf.keras.layers.Softmax()])
model.save("model.keras")
loaded_model = tf.keras.models.load_model("model.keras")
x = tf.random.uniform((10, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
Note that `model.save()` is an alias for `tf.keras.models.save_model()`.
The SavedModel or HDF5 file contains:
- The model's configuration (architecture)
- The model's weights
- The model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state, without any of
the code used for model definition or training.
Note that the model weights may have different scoped names after being
loaded. Scoped names include the model/layer names, such as
`"dense_1/kernel:0"`. It is recommended that you use the layer properties to
access specific variables, e.g. `model.get_layer("dense_1").kernel`.
__SavedModel serialization format__
With `save_format="tf"`, the model and all trackable objects attached
to the it (e.g. layers and variables) are saved as a TensorFlow SavedModel.
The model config, weights, and optimizer are included in the SavedModel.
Additionally, for every Keras layer attached to the model, the SavedModel
stores:
* The config and metadata -- e.g. name, dtype, trainable status
* Traced call and loss functions, which are stored as TensorFlow
subgraphs.
The traced functions allow the SavedModel format to save and load custom
layers without the original class definition.
You can choose to not save the traced functions by disabling the
`save_traces` option. This will decrease the time it takes to save the model
and the amount of disk space occupied by the output SavedModel. If you
enable this option, then you _must_ provide all custom class definitions
when loading the model. See the `custom_objects` argument in
`tf.keras.models.load_model`.
|
1,950 | def test_random_spanning_tree_additive_small():
pytest.importorskip("numpy")
edges = {
(0, 1): 1,
(0, 2): 1,
(0, 5): 3,
(1, 2): 2,
(1, 4): 3,
(2, 3): 3,
(5, 3): 4,
(5, 4): 5,
(4, 3): 4,
}
# Build the graph
G = nx.Graph()
for u, v in edges:
G.add_edge(u, v, weight=edges[(u, v)])
solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)]
solution = nx.Graph()
solution.add_edges_from(solution_edges)
sampled_tree = nx.random_spanning_tree(
G, weight="weight", multiplicative=False, seed=37
)
assert nx.utils.edges_equal(solution.edges, sampled_tree.edges)
@pytest.mark.slow |
Sample a single spanning tree from the additive method.
| 9 | 78 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_random_spanning_tree_additive_small():
pytest.importorskip("numpy")
edges = {
(0, 1): 1,
(0, 2): 1,
(0, 5): 3,
(1, 2): 2,
(1, 4): 3,
(2, 3): 3,
(5, 3): 4,
(5, 4): 5,
(4, 3): 4,
}
# Build the graph
G = nx.Graph()
for u, v in edges:
G.add_edge(u, v, weight=edges[(u, v)])
solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)]
solution = nx.Graph()
solution.add_edges_from(solution_edges)
sampled_tree = nx.random_spanning_tree(
G, weight="weight", multiplicative=False, seed=37
)
assert nx.utils.edges_equal(solution.edges, sampled_tree.edges)
@pytest.mark.slow
```
###Assistant :
Sample a single spanning tree from the additive method.
|
1,951 | def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):
if gamma > 1:
raise ValueError(f"Gamma should be < 1, got {gamma}.")
# exlude invalid pixels and extremely large diplacements
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)
valid_flow_mask = valid_flow_mask[:, None, :, :]
flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W)
abs_diff = (flow_preds - flow_gt).abs()
abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4))
num_predictions = flow_preds.shape[0]
weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.device)
flow_loss = (abs_diff * weights).sum()
return flow_loss
| Loss function defined over sequence of flow predictions | 8 | 86 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):
if gamma > 1:
raise ValueError(f"Gamma should be < 1, got {gamma}.")
# exlude invalid pixels and extremely large diplacements
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)
valid_flow_mask = valid_flow_mask[:, None, :, :]
flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W)
abs_diff = (flow_preds - flow_gt).abs()
abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4))
num_predictions = flow_preds.shape[0]
weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.device)
flow_loss = (abs_diff * weights).sum()
return flow_loss
```
###Assistant : Loss function defined over sequence of flow predictions
|
1,952 | def booleans_processing(config, **kwargs):
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = (
kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None)
)
else:
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs.get("return_dict", None) not in (None, True):
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = getattr(config, "use_cache", None)
return final_booleans
|
Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
graph)
Args:
config ([`PretrainedConfig`]):
The config of the running model.
**kwargs:
The boolean parameters
Returns:
A dictionary with the proper values for each boolean
| 45 | 108 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def booleans_processing(config, **kwargs):
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = (
kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None)
)
else:
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs.get("return_dict", None) not in (None, True):
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = getattr(config, "use_cache", None)
return final_booleans
```
###Assistant :
Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
graph)
Args:
config ([`PretrainedConfig`]):
The config of the running model.
**kwargs:
The boolean parameters
Returns:
A dictionary with the proper values for each boolean
|
1,953 | def test_dagrun_root_fail_unfinished(self):
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=None,
)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', dr.run_id)
with pytest.raises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite having an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr.update_state()
assert dr.state == State.RUNNING
|
DagRuns with one unfinished and one failed root task -> RUNNING
| 11 | 84 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_dagrun_root_fail_unfinished(self):
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=None,
)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', dr.run_id)
with pytest.raises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite having an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr.update_state()
assert dr.state == State.RUNNING
```
###Assistant :
DagRuns with one unfinished and one failed root task -> RUNNING
|
1,954 | def _get_call_args(backend_name, data, args, kwargs):
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("sort_columns", False),
("xlabel", None),
("ylabel", None),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if "sort_columns" in itertools.chain(args, kwargs.keys()):
warnings.warn(
"`sort_columns` is deprecated and will be removed in a future "
"version.",
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
[f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)]
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {name: value for (name, _), value in zip(arg_def, args)}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
|
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
| 28 | 266 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_call_args(backend_name, data, args, kwargs):
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("sort_columns", False),
("xlabel", None),
("ylabel", None),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if "sort_columns" in itertools.chain(args, kwargs.keys()):
warnings.warn(
"`sort_columns` is deprecated and will be removed in a future "
"version.",
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
[f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)]
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {name: value for (name, _), value in zip(arg_def, args)}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
```
###Assistant :
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
|
1,955 | async def test_ahas_key(self):
await cache.aset("hello1", "goodbye1")
self.assertIs(await cache.ahas_key("hello1"), False)
self.assertIs(await cache.ahas_key("goodbye1"), False)
| ahas_key() doesn't ever return True for the dummy cache backend. | 10 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_ahas_key(self):
await cache.aset("hello1", "goodbye1")
self.assertIs(await cache.ahas_key("hello1"), False)
self.assertIs(await cache.ahas_key("goodbye1"), False)
```
###Assistant : ahas_key() doesn't ever return True for the dummy cache backend.
|
1,956 | def min_temp(self) -> float:
if self.temperature_unit == UnitOfTemperature.CELSIUS:
return TEMP_MIN
return TEMP_MIN_F
| Return the minimum temperature supported by the device. | 8 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def min_temp(self) -> float:
if self.temperature_unit == UnitOfTemperature.CELSIUS:
return TEMP_MIN
return TEMP_MIN_F
```
###Assistant : Return the minimum temperature supported by the device.
|
1,957 | def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):
if x and x not in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir, cdir=cdir)
else:
return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
|
Wrapper to _eval_nseries if assumptions allow, else to series.
If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is
called. This calculates "n" terms in the innermost expressions and
then builds up the final series just by "cross-multiplying" everything
out.
The optional ``logx`` parameter can be used to replace any log(x) in the
returned series with a symbolic value to avoid evaluating log(x) at 0. A
symbol to use in place of log(x) should be provided.
Advantage -- it's fast, because we do not have to determine how many
terms we need to calculate in advance.
Disadvantage -- you may end up with less terms than you may have
expected, but the O(x**n) term appended will always be correct and
so the result, though perhaps shorter, will also be correct.
If any of those assumptions is not met, this is treated like a
wrapper to series which will try harder to return the correct
number of terms.
See also lseries().
Examples
========
>>> from sympy import sin, log, Symbol
>>> from sympy.abc import x, y
>>> sin(x).nseries(x, 0, 6)
x - x**3/6 + x**5/120 + O(x**6)
>>> log(x+1).nseries(x, 0, 5)
x - x**2/2 + x**3/3 - x**4/4 + O(x**5)
Handling of the ``logx`` parameter --- in the following example the
expansion fails since ``sin`` does not have an asymptotic expansion
at -oo (the limit of log(x) as x approaches 0):
>>> e = sin(log(x))
>>> e.nseries(x, 0, 6)
Traceback (most recent call last):
...
PoleError: ...
...
>>> logx = Symbol('logx')
>>> e.nseries(x, 0, 6, logx=logx)
sin(logx)
In the following example, the expansion works but only returns self
unless the ``logx`` parameter is used:
>>> e = x**y
>>> e.nseries(x, 0, 2)
x**y
>>> e.nseries(x, 0, 2, logx=logx)
exp(logx*y)
| 294 | 49 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):
if x and x not in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir, cdir=cdir)
else:
return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
```
###Assistant :
Wrapper to _eval_nseries if assumptions allow, else to series.
If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is
called. This calculates "n" terms in the innermost expressions and
then builds up the final series just by "cross-multiplying" everything
out.
The optional ``logx`` parameter can be used to replace any log(x) in the
returned series with a symbolic value to avoid evaluating log(x) at 0. A
symbol to use in place of log(x) should be provided.
Advantage -- it's fast, because we do not have to determine how many
terms we need to calculate in advance.
Disadvantage -- you may end up with less terms than you may have
expected, but the O(x**n) term appended will always be correct and
so the result, though perhaps shorter, will also be correct.
If any of those assumptions is not met, this is treated like a
wrapper to series which will try harder to return the correct
number of terms.
See also lseries().
Examples
========
>>> from sympy import sin, log, Symbol
>>> from sympy.abc import x, y
>>> sin(x).nseries(x, 0, 6)
x - x**3/6 + x**5/120 + O(x**6)
>>> log(x+1).nseries(x, 0, 5)
x - x**2/2 + x**3/3 - x**4/4 + O(x**5)
Handling of the ``logx`` parameter --- in the following example the
expansion fails since ``sin`` does not have an asymptotic expansion
at -oo (the limit of log(x) as x approaches 0):
>>> e = sin(log(x))
>>> e.nseries(x, 0, 6)
Traceback (most recent call last):
...
PoleError: ...
...
>>> logx = Symbol('logx')
>>> e.nseries(x, 0, 6, logx=logx)
sin(logx)
In the following example, the expansion works but only returns self
unless the ``logx`` parameter is used:
>>> e = x**y
>>> e.nseries(x, 0, 2)
x**y
>>> e.nseries(x, 0, 2, logx=logx)
exp(logx*y)
|
1,958 | def test_getitem_error(self, exception):
container = self.Container(exception("failure"))
with pytest.raises(validate.ValidationError) as cm:
validate.validate(validate.get("foo", default="default"), container)
assert_validationerror(cm.value, )
|
ValidationError(GetItemSchema):
Could not get key 'foo' from object Container
Context:
failure
| 11 | 15 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_getitem_error(self, exception):
container = self.Container(exception("failure"))
with pytest.raises(validate.ValidationError) as cm:
validate.validate(validate.get("foo", default="default"), container)
assert_validationerror(cm.value, )
```
###Assistant :
ValidationError(GetItemSchema):
Could not get key 'foo' from object Container
Context:
failure
|
1,959 | def num_base_priors(self) -> List[int]:
return [1 for _ in range(len(self.strides))]
| list[int]: The number of priors (points) at a point
on the feature grid | 13 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def num_base_priors(self) -> List[int]:
return [1 for _ in range(len(self.strides))]
```
###Assistant : list[int]: The number of priors (points) at a point
on the feature grid
|
1,960 | def test_logentry_change_message_localized_datetime_input(self):
post_data = {
"site": self.site.pk,
"title": "Changed",
"hist": "Some content",
"created_0": "12/03/2008",
"created_1": "11:54",
}
with translation.override("fr"):
change_url = reverse(
"admin:admin_utils_article_change", args=[quote(self.a1.pk)]
)
response = self.client.post(change_url, post_data)
self.assertRedirects(
response, reverse("admin:admin_utils_article_changelist")
)
logentry = LogEntry.objects.filter(
content_type__model__iexact="article"
).latest("id")
self.assertEqual(logentry.get_change_message(), "Changed Title and History.")
|
Localized date/time inputs shouldn't affect changed form data detection.
| 9 | 43 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_logentry_change_message_localized_datetime_input(self):
post_data = {
"site": self.site.pk,
"title": "Changed",
"hist": "Some content",
"created_0": "12/03/2008",
"created_1": "11:54",
}
with translation.override("fr"):
change_url = reverse(
"admin:admin_utils_article_change", args=[quote(self.a1.pk)]
)
response = self.client.post(change_url, post_data)
self.assertRedirects(
response, reverse("admin:admin_utils_article_changelist")
)
logentry = LogEntry.objects.filter(
content_type__model__iexact="article"
).latest("id")
self.assertEqual(logentry.get_change_message(), "Changed Title and History.")
```
###Assistant :
Localized date/time inputs shouldn't affect changed form data detection.
|
1,961 | def _cleanup_code(code):
return code # Nothing to do here
# language=PythonVerboseRegExp
_call_function_bytecode = bytecode_regex(
rb
)
else:
# Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides
# unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is going to render them
# unreadable, so instead we pre-process the bytecode and filter the offending opcodes out. |
# Matches `global_function('some', 'constant', 'arguments')`.
# Load the global function. In code with >256 of names, this may require extended name references.
((?:`EXTENDED_ARG`.)*
(?:`LOAD_NAME`|`LOAD_GLOBAL`|`LOAD_FAST`).)
# For foo.bar.whizz(), the above is the 'foo', below is the 'bar.whizz'.
((?:(?:`EXTENDED_ARG`.)*
(?:`LOAD_METHOD`|`LOAD_ATTR`).)*)
# Load however many arguments it takes. These (for now) must all be constants.
# Again, code with >256 constants may need extended enumeration.
((?:(?:`EXTENDED_ARG`.)*
`LOAD_CONST`.)*)
# Call the function. The parameter is the argument count (which may also be >256) if CALL_FUNCTION or
# CALL_METHOD are used. For CALL_FUNCTION_EX, the parameter are flags.
((?:`EXTENDED_ARG`.)*
(?:`CALL_FUNCTION`|`CALL_METHOD`|`CALL_FUNCTION_EX`).)
| 94 | 66 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _cleanup_code(code):
return code # Nothing to do here
# language=PythonVerboseRegExp
_call_function_bytecode = bytecode_regex(
rb
)
else:
# Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides
# unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is going to render them
# unreadable, so instead we pre-process the bytecode and filter the offending opcodes out.
```
###Assistant :
# Matches `global_function('some', 'constant', 'arguments')`.
# Load the global function. In code with >256 of names, this may require extended name references.
((?:`EXTENDED_ARG`.)*
(?:`LOAD_NAME`|`LOAD_GLOBAL`|`LOAD_FAST`).)
# For foo.bar.whizz(), the above is the 'foo', below is the 'bar.whizz'.
((?:(?:`EXTENDED_ARG`.)*
(?:`LOAD_METHOD`|`LOAD_ATTR`).)*)
# Load however many arguments it takes. These (for now) must all be constants.
# Again, code with >256 constants may need extended enumeration.
((?:(?:`EXTENDED_ARG`.)*
`LOAD_CONST`.)*)
# Call the function. The parameter is the argument count (which may also be >256) if CALL_FUNCTION or
# CALL_METHOD are used. For CALL_FUNCTION_EX, the parameter are flags.
((?:`EXTENDED_ARG`.)*
(?:`CALL_FUNCTION`|`CALL_METHOD`|`CALL_FUNCTION_EX`).)
|
1,962 | def filemode(mode):
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
| Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
| 14 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def filemode(mode):
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
```
###Assistant : Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
|
1,963 | def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):
r
k = _setup_kernel(k) * gain
p = k.shape[0] - 1
return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
#----------------------------------------------------------------------------
| Filter a batch of 2D images with the given FIR filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and filters each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
| 130 | 25 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):
r
k = _setup_kernel(k) * gain
p = k.shape[0] - 1
return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
#----------------------------------------------------------------------------
```
###Assistant : Filter a batch of 2D images with the given FIR filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and filters each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
|
1,964 | def execute():
frappe.reload_doctype("Pricing Rule")
currency = frappe.db.get_default("currency")
for doc in frappe.get_all("Pricing Rule", fields=["company", "name"]):
if doc.company:
currency = frappe.get_cached_value("Company", doc.company, "default_currency")
frappe.db.sql(
, (currency, doc.name)
)
| update `tabPricing Rule` set currency = %s where name = %s | 11 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def execute():
frappe.reload_doctype("Pricing Rule")
currency = frappe.db.get_default("currency")
for doc in frappe.get_all("Pricing Rule", fields=["company", "name"]):
if doc.company:
currency = frappe.get_cached_value("Company", doc.company, "default_currency")
frappe.db.sql(
, (currency, doc.name)
)
```
###Assistant : update `tabPricing Rule` set currency = %s where name = %s
|
1,965 | def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4))
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose((0, 1, 3, 2)))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape(
(-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1],
-1)) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.reshape((-1, self.num_heads, N, N))
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose((0, 2, 1, 3)).reshape((B_, N, C))
x = self.proj(x)
x = self.proj_drop(x)
return x
|
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
| 21 | 131 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4))
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose((0, 1, 3, 2)))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape(
(-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1],
-1)) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.reshape((-1, self.num_heads, N, N))
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose((0, 2, 1, 3)).reshape((B_, N, C))
x = self.proj(x)
x = self.proj_drop(x)
return x
```
###Assistant :
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
1,966 | def test_join_rules_invite(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(RoomVersions.V6, creator),
("m.room.member", creator): _join_event(RoomVersions.V6, creator),
("m.room.join_rules", ""): _join_rules_event(
RoomVersions.V6, creator, "invite"
),
}
# A join without an invite is rejected.
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_member_event(RoomVersions.V6, pleb, "join", sender=creator),
auth_events.values(),
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "ban"
)
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user who left cannot re-join.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "leave"
)
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "join"
)
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "invite", sender=creator
)
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
|
Test joining an invite only room.
| 6 | 154 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_join_rules_invite(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(RoomVersions.V6, creator),
("m.room.member", creator): _join_event(RoomVersions.V6, creator),
("m.room.join_rules", ""): _join_rules_event(
RoomVersions.V6, creator, "invite"
),
}
# A join without an invite is rejected.
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_member_event(RoomVersions.V6, pleb, "join", sender=creator),
auth_events.values(),
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "ban"
)
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user who left cannot re-join.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "leave"
)
with self.assertRaises(AuthError):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "join"
)
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
RoomVersions.V6, pleb, "invite", sender=creator
)
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(RoomVersions.V6, pleb),
auth_events.values(),
)
```
###Assistant :
Test joining an invite only room.
|
1,967 | def validate_onboarding(data):
logging.info(f"Validating onboarding data {data}")
messages = data['outputs']['messages']
if len(messages) == 0:
return False
status_message = messages[-2]
if status_message is None:
return False
submitted_data = status_message.get('data')
if submitted_data is None:
return False
final_status = submitted_data.get('final_status')
return final_status == ONBOARD_SUCCESS
# TODO: find a better way to avoid duplicating this from model_chat world.py |
Check the contents of the data to ensure they are valid.
| 11 | 53 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def validate_onboarding(data):
logging.info(f"Validating onboarding data {data}")
messages = data['outputs']['messages']
if len(messages) == 0:
return False
status_message = messages[-2]
if status_message is None:
return False
submitted_data = status_message.get('data')
if submitted_data is None:
return False
final_status = submitted_data.get('final_status')
return final_status == ONBOARD_SUCCESS
# TODO: find a better way to avoid duplicating this from model_chat world.py
```
###Assistant :
Check the contents of the data to ensure they are valid.
|
1,968 | async def test_async_track_entity_registry_updated_event_with_empty_list(hass):
unsub_single = async_track_entity_registry_updated_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2 = async_track_entity_registry_updated_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2()
unsub_single()
| Test async_track_entity_registry_updated_event passing an empty list of entities. | 8 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_async_track_entity_registry_updated_event_with_empty_list(hass):
unsub_single = async_track_entity_registry_updated_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2 = async_track_entity_registry_updated_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2()
unsub_single()
```
###Assistant : Test async_track_entity_registry_updated_event passing an empty list of entities.
|
1,969 | def generate_self_signed_tls_certs():
try:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
except ImportError:
raise ImportError(
"Using `Security.temporary` requires `cryptography`, please "
"install it using either pip or conda"
)
key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
key_contents = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
).decode()
ray_interal = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "ray-internal")])
# This is the same logic used by the GCS server to acquire a
# private/interal IP address to listen on. If we just use localhost +
# 127.0.0.1 then we won't be able to connect to the GCS and will get
# an error like "No match found for server name: 192.168.X.Y"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
private_ip_address = s.getsockname()[0]
s.close()
altnames = x509.SubjectAlternativeName(
[
x509.DNSName(
socket.gethostbyname(socket.gethostname())
), # Probably 127.0.0.1
x509.DNSName("127.0.0.1"),
x509.DNSName(private_ip_address), # 192.168.*.*
x509.DNSName("localhost"),
]
)
now = datetime.datetime.utcnow()
cert = (
x509.CertificateBuilder()
.subject_name(ray_interal)
.issuer_name(ray_interal)
.add_extension(altnames, critical=False)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(now)
.not_valid_after(now + datetime.timedelta(days=365))
.sign(key, hashes.SHA256(), default_backend())
)
cert_contents = cert.public_bytes(serialization.Encoding.PEM).decode()
return cert_contents, key_contents
| Create self-signed key/cert pair for testing.
This method requires the library ``cryptography`` be installed.
| 14 | 167 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def generate_self_signed_tls_certs():
try:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
except ImportError:
raise ImportError(
"Using `Security.temporary` requires `cryptography`, please "
"install it using either pip or conda"
)
key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
key_contents = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
).decode()
ray_interal = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "ray-internal")])
# This is the same logic used by the GCS server to acquire a
# private/interal IP address to listen on. If we just use localhost +
# 127.0.0.1 then we won't be able to connect to the GCS and will get
# an error like "No match found for server name: 192.168.X.Y"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
private_ip_address = s.getsockname()[0]
s.close()
altnames = x509.SubjectAlternativeName(
[
x509.DNSName(
socket.gethostbyname(socket.gethostname())
), # Probably 127.0.0.1
x509.DNSName("127.0.0.1"),
x509.DNSName(private_ip_address), # 192.168.*.*
x509.DNSName("localhost"),
]
)
now = datetime.datetime.utcnow()
cert = (
x509.CertificateBuilder()
.subject_name(ray_interal)
.issuer_name(ray_interal)
.add_extension(altnames, critical=False)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(now)
.not_valid_after(now + datetime.timedelta(days=365))
.sign(key, hashes.SHA256(), default_backend())
)
cert_contents = cert.public_bytes(serialization.Encoding.PEM).decode()
return cert_contents, key_contents
```
###Assistant : Create self-signed key/cert pair for testing.
This method requires the library ``cryptography`` be installed.
|
1,970 | def print_help(self):
has_ticker_start = "[unvl]" if not self.ticker else ""
has_ticker_end = "[/unvl]" if not self.ticker else ""
help_text = f
console.print(text=help_text, menu="Stocks - Government")
| Print help
[src][QuiverQuant][/src]
[info]Explore:[/info][cmds]
lasttrades last trades
topbuys show most purchased stocks
topsells show most sold stocks
lastcontracts show last government contracts given out
qtrcontracts quarterly government contracts analysis
toplobbying top corporate lobbying tickers
load load a specific ticker for analysis[/cmds]
[param]Ticker: [/param]{self.ticker or None}{has_ticker_start}[cmds]
gtrades show government trades for ticker
contracts show government contracts for ticker
histcont show historical quarterly government contracts for ticker
lobbying corporate lobbying details for ticker[/cmds]{has_ticker_end}
| 71 | 25 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def print_help(self):
has_ticker_start = "[unvl]" if not self.ticker else ""
has_ticker_end = "[/unvl]" if not self.ticker else ""
help_text = f
console.print(text=help_text, menu="Stocks - Government")
```
###Assistant : Print help
[src][QuiverQuant][/src]
[info]Explore:[/info][cmds]
lasttrades last trades
topbuys show most purchased stocks
topsells show most sold stocks
lastcontracts show last government contracts given out
qtrcontracts quarterly government contracts analysis
toplobbying top corporate lobbying tickers
load load a specific ticker for analysis[/cmds]
[param]Ticker: [/param]{self.ticker or None}{has_ticker_start}[cmds]
gtrades show government trades for ticker
contracts show government contracts for ticker
histcont show historical quarterly government contracts for ticker
lobbying corporate lobbying details for ticker[/cmds]{has_ticker_end}
|
1,971 | def get_item_warehouse_projected_qty(items_to_consider):
item_warehouse_projected_qty = {}
for item_code, warehouse, projected_qty in frappe.db.sql(
.format(
", ".join(["%s"] * len(items_to_consider))
),
items_to_consider,
):
if item_code not in item_warehouse_projected_qty:
item_warehouse_projected_qty.setdefault(item_code, {})
if warehouse not in item_warehouse_projected_qty.get(item_code):
item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse)
while warehouse_doc.parent_warehouse:
if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse):
item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt(
projected_qty
)
else:
item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse_doc.parent_warehouse)
return item_warehouse_projected_qty
| select item_code, warehouse, projected_qty
from tabBin where item_code in ({0})
and (warehouse != "" and warehouse is not null) | 19 | 60 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_item_warehouse_projected_qty(items_to_consider):
item_warehouse_projected_qty = {}
for item_code, warehouse, projected_qty in frappe.db.sql(
.format(
", ".join(["%s"] * len(items_to_consider))
),
items_to_consider,
):
if item_code not in item_warehouse_projected_qty:
item_warehouse_projected_qty.setdefault(item_code, {})
if warehouse not in item_warehouse_projected_qty.get(item_code):
item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse)
while warehouse_doc.parent_warehouse:
if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse):
item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt(
projected_qty
)
else:
item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse_doc.parent_warehouse)
return item_warehouse_projected_qty
```
###Assistant : select item_code, warehouse, projected_qty
from tabBin where item_code in ({0})
and (warehouse != "" and warehouse is not null)
|
1,972 | def _collect_type_vars(types, typevar_types=None):
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
ClassVar = typing.ClassVar
# On older versions of typing there is an internal class named "Final".
# 3.8+
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
else: | Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
| 22 | 132 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _collect_type_vars(types, typevar_types=None):
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
ClassVar = typing.ClassVar
# On older versions of typing there is an internal class named "Final".
# 3.8+
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
else:
```
###Assistant : Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
|
1,973 | def from_dataframe(df, allow_copy=True):
if isinstance(df, pd.DataFrame):
return df
if not hasattr(df, "__dataframe__"):
raise ValueError("`df` does not support __dataframe__")
return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))
|
Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.
Parameters
----------
df : DataFrameXchg
Object supporting the exchange protocol, i.e. `__dataframe__` method.
allow_copy : bool, default: True
Whether to allow copying the memory to perform the conversion
(if false then zero-copy approach is requested).
Returns
-------
pd.DataFrame
| 48 | 20 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def from_dataframe(df, allow_copy=True):
if isinstance(df, pd.DataFrame):
return df
if not hasattr(df, "__dataframe__"):
raise ValueError("`df` does not support __dataframe__")
return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))
```
###Assistant :
Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.
Parameters
----------
df : DataFrameXchg
Object supporting the exchange protocol, i.e. `__dataframe__` method.
allow_copy : bool, default: True
Whether to allow copying the memory to perform the conversion
(if false then zero-copy approach is requested).
Returns
-------
pd.DataFrame
|
1,974 | def parse_wheel(wheel_zip, name):
# type: (ZipFile, str) -> Tuple[str, Message]
try:
info_dir = wheel_dist_info_dir(wheel_zip, name)
metadata = wheel_metadata(wheel_zip, info_dir)
version = wheel_version(metadata)
except UnsupportedWheel as e:
raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e)))
check_compatibility(version, name)
return info_dir, metadata
| Extract information from the provided wheel, ensuring it meets basic
standards.
Returns the name of the .dist-info directory and the parsed WHEEL metadata.
| 23 | 39 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def parse_wheel(wheel_zip, name):
# type: (ZipFile, str) -> Tuple[str, Message]
try:
info_dir = wheel_dist_info_dir(wheel_zip, name)
metadata = wheel_metadata(wheel_zip, info_dir)
version = wheel_version(metadata)
except UnsupportedWheel as e:
raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e)))
check_compatibility(version, name)
return info_dir, metadata
```
###Assistant : Extract information from the provided wheel, ensuring it meets basic
standards.
Returns the name of the .dist-info directory and the parsed WHEEL metadata.
|
1,975 | def style_docstrings_in_code(code, max_len=119):
# fmt: off
splits = code.split('\"\"\"')
splits = [
(s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len))
for i, s in enumerate(splits)
]
black_errors = "\n\n".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0])
splits = [s[0] if isinstance(s, tuple) else s for s in splits]
clean_code = '\"\"\"'.join(splits)
# fmt: on
return clean_code, black_errors
|
Style all docstrings in some code.
Args:
code (`str`): The code in which we want to style the docstrings.
max_len (`int`): The maximum number of characters per line.
Returns:
`Tuple[str, str]`: A tuple with the clean code and the black errors (if any)
| 43 | 70 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def style_docstrings_in_code(code, max_len=119):
# fmt: off
splits = code.split('\"\"\"')
splits = [
(s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len))
for i, s in enumerate(splits)
]
black_errors = "\n\n".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0])
splits = [s[0] if isinstance(s, tuple) else s for s in splits]
clean_code = '\"\"\"'.join(splits)
# fmt: on
return clean_code, black_errors
```
###Assistant :
Style all docstrings in some code.
Args:
code (`str`): The code in which we want to style the docstrings.
max_len (`int`): The maximum number of characters per line.
Returns:
`Tuple[str, str]`: A tuple with the clean code and the black errors (if any)
|
1,976 | def check_version_info(cluster_metadata):
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_address()
error_message = (
"Version mismatch: The cluster was started with:\n"
" Ray: " + cluster_version_info[0] + "\n"
" Python: " + cluster_version_info[1] + "\n"
"This process on node " + node_ip_address + " was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
)
raise RuntimeError(error_message)
| Check if the Python and Ray versions stored in GCS matches this process.
Args:
cluster_metadata: Ray cluster metadata from GCS.
Raises:
Exception: An exception is raised if there is a version mismatch.
| 32 | 73 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def check_version_info(cluster_metadata):
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_address()
error_message = (
"Version mismatch: The cluster was started with:\n"
" Ray: " + cluster_version_info[0] + "\n"
" Python: " + cluster_version_info[1] + "\n"
"This process on node " + node_ip_address + " was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
)
raise RuntimeError(error_message)
```
###Assistant : Check if the Python and Ray versions stored in GCS matches this process.
Args:
cluster_metadata: Ray cluster metadata from GCS.
Raises:
Exception: An exception is raised if there is a version mismatch.
|
1,977 | def get_console() -> "Console":
global _console
if _console is None:
from .console import Console
_console = Console()
return _console
| Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
and hasn't been explicitly given one.
Returns:
Console: A console instance.
| 25 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_console() -> "Console":
global _console
if _console is None:
from .console import Console
_console = Console()
return _console
```
###Assistant : Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
and hasn't been explicitly given one.
Returns:
Console: A console instance.
|
1,978 | def is_hash_allowed(self, hashes):
# type: (Optional[Hashes]) -> bool
if hashes is None or not self.has_hash:
return False
# Assert non-None so mypy knows self.hash_name and self.hash are str.
assert self.hash_name is not None
assert self.hash is not None
return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)
# TODO: Relax this comparison logic to ignore, for example, fragments. |
Return True if the link has a hash and it is allowed.
| 12 | 52 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def is_hash_allowed(self, hashes):
# type: (Optional[Hashes]) -> bool
if hashes is None or not self.has_hash:
return False
# Assert non-None so mypy knows self.hash_name and self.hash are str.
assert self.hash_name is not None
assert self.hash is not None
return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)
# TODO: Relax this comparison logic to ignore, for example, fragments.
```
###Assistant :
Return True if the link has a hash and it is allowed.
|
1,979 | def coverage_error(y_true, y_score, *, sample_weight=None):
y_true = check_array(y_true, ensure_2d=True)
y_score = check_array(y_score, ensure_2d=True)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true, input_name="y_true")
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
| Coverage error measure.
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Note: Our implementation's score is 1 greater than the one given in
Tsoumakas et al., 2010. This extends it to handle the degenerate case
in which an instance has 0 true labels.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
| 179 | 60 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def coverage_error(y_true, y_score, *, sample_weight=None):
y_true = check_array(y_true, ensure_2d=True)
y_score = check_array(y_score, ensure_2d=True)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true, input_name="y_true")
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
```
###Assistant : Coverage error measure.
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Note: Our implementation's score is 1 greater than the one given in
Tsoumakas et al., 2010. This extends it to handle the degenerate case
in which an instance has 0 true labels.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
|
1,980 | def total_predict_batches(self) -> int:
return sum(self.trainer.num_predict_batches)
| The total number of prediction batches, which may change from epoch to epoch.
Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader
is of infinite size.
| 37 | 6 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def total_predict_batches(self) -> int:
return sum(self.trainer.num_predict_batches)
```
###Assistant : The total number of prediction batches, which may change from epoch to epoch.
Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader
is of infinite size.
|
1,981 | def shape(self) -> Dict[str, Tuple[int]]:
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
| Shape of each split of the dataset (number of columns, number of rows).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.shape
{'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
```
| 36 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def shape(self) -> Dict[str, Tuple[int]]:
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
```
###Assistant : Shape of each split of the dataset (number of columns, number of rows).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.shape
{'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
```
|
1,982 | def is_torch_support_available(self) -> bool:
if is_torch_available():
from transformers.file_utils import torch_version
return torch_version >= self.torch_onnx_minimum_version
else:
return False
|
The minimum PyTorch version required to export the model.
Returns:
`bool`: Whether the installed version of PyTorch is compatible with the model.
| 22 | 17 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def is_torch_support_available(self) -> bool:
if is_torch_available():
from transformers.file_utils import torch_version
return torch_version >= self.torch_onnx_minimum_version
else:
return False
```
###Assistant :
The minimum PyTorch version required to export the model.
Returns:
`bool`: Whether the installed version of PyTorch is compatible with the model.
|
1,983 | def check_connection(self, logger, config) -> Tuple[bool, any]:
auth_header = TokenAuthenticator(token=config["api_key"]).get_auth_header()
ping_url = ORB_API_BASE_URL + "ping"
ping_response = requests.get(ping_url, headers=auth_header)
try:
ping_response.raise_for_status()
return True, None
except Exception as e:
return False, e
|
Makes a request to the /ping endpoint, which validates that the authentication credentials are appropriate.
API Docs: https://docs.withorb.com/reference/ping
| 18 | 31 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def check_connection(self, logger, config) -> Tuple[bool, any]:
auth_header = TokenAuthenticator(token=config["api_key"]).get_auth_header()
ping_url = ORB_API_BASE_URL + "ping"
ping_response = requests.get(ping_url, headers=auth_header)
try:
ping_response.raise_for_status()
return True, None
except Exception as e:
return False, e
```
###Assistant :
Makes a request to the /ping endpoint, which validates that the authentication credentials are appropriate.
API Docs: https://docs.withorb.com/reference/ping
|
1,984 | def add_items_upsert(self, content_type_pk, indexers):
compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection)
title_sql = []
autocomplete_sql = []
body_sql = []
data_params = []
for indexer in indexers:
data_params.extend((content_type_pk, indexer.id))
# Compile title value
value = compiler.prepare_value(
IndexEntry._meta.get_field("title"), indexer.title
)
sql, params = value.as_sql(compiler, self.connection)
title_sql.append(sql)
data_params.extend(params)
# Compile autocomplete value
value = compiler.prepare_value(
IndexEntry._meta.get_field("autocomplete"), indexer.autocomplete
)
sql, params = value.as_sql(compiler, self.connection)
autocomplete_sql.append(sql)
data_params.extend(params)
# Compile body value
value = compiler.prepare_value(
IndexEntry._meta.get_field("body"), indexer.body
)
sql, params = value.as_sql(compiler, self.connection)
body_sql.append(sql)
data_params.extend(params)
data_sql = ", ".join(
[
"(%%s, %%s, %s, %s, %s, 1.0)" % (a, b, c)
for a, b, c in zip(title_sql, autocomplete_sql, body_sql)
]
)
with self.connection.cursor() as cursor:
cursor.execute(
% (IndexEntry._meta.db_table, data_sql),
data_params,
)
self._refresh_title_norms()
|
INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm)
(VALUES %s)
ON CONFLICT (content_type_id, object_id)
DO UPDATE SET title = EXCLUDED.title,
title_norm = 1.0,
autocomplete = EXCLUDED.autocomplete,
body = EXCLUDED.body
| 30 | 112 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def add_items_upsert(self, content_type_pk, indexers):
compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection)
title_sql = []
autocomplete_sql = []
body_sql = []
data_params = []
for indexer in indexers:
data_params.extend((content_type_pk, indexer.id))
# Compile title value
value = compiler.prepare_value(
IndexEntry._meta.get_field("title"), indexer.title
)
sql, params = value.as_sql(compiler, self.connection)
title_sql.append(sql)
data_params.extend(params)
# Compile autocomplete value
value = compiler.prepare_value(
IndexEntry._meta.get_field("autocomplete"), indexer.autocomplete
)
sql, params = value.as_sql(compiler, self.connection)
autocomplete_sql.append(sql)
data_params.extend(params)
# Compile body value
value = compiler.prepare_value(
IndexEntry._meta.get_field("body"), indexer.body
)
sql, params = value.as_sql(compiler, self.connection)
body_sql.append(sql)
data_params.extend(params)
data_sql = ", ".join(
[
"(%%s, %%s, %s, %s, %s, 1.0)" % (a, b, c)
for a, b, c in zip(title_sql, autocomplete_sql, body_sql)
]
)
with self.connection.cursor() as cursor:
cursor.execute(
% (IndexEntry._meta.db_table, data_sql),
data_params,
)
self._refresh_title_norms()
```
###Assistant :
INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm)
(VALUES %s)
ON CONFLICT (content_type_id, object_id)
DO UPDATE SET title = EXCLUDED.title,
title_norm = 1.0,
autocomplete = EXCLUDED.autocomplete,
body = EXCLUDED.body
|
1,985 | def test_already_created_plus_written_results(indexer, indexer_cache) -> None:
org_id = 1234
raw_indexer = indexer
indexer = CachingIndexer(indexer_cache, indexer)
v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0")
v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1")
v2 = raw_indexer.record(use_case_id, org_id, "v1.2.2")
expected_mapping = {"v1.2.0": v0, "v1.2.1": v1, "v1.2.2": v2}
results = indexer.bulk_record(
use_case_id=use_case_id, org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2"}}
)
assert len(results[org_id]) == len(expected_mapping) == 3
for string, id in results[org_id].items():
assert expected_mapping[string] == id
results = indexer.bulk_record(
use_case_id=use_case_id,
org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2", "v1.2.3"}},
)
v3 = raw_indexer.resolve(use_case_id, org_id, "v1.2.3")
expected_mapping["v1.2.3"] = v3
assert len(results[org_id]) == len(expected_mapping) == 4
for string, id in results[org_id].items():
assert expected_mapping[string] == id
fetch_meta = results.get_fetch_metadata()
assert_fetch_type_for_tag_string_set(
fetch_meta[org_id], FetchType.CACHE_HIT, {"v1.2.0", "v1.2.1", "v1.2.2"}
)
assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {"v1.2.3"})
|
Test that we correctly combine db read results with db write results
for the same organization.
| 16 | 108 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_already_created_plus_written_results(indexer, indexer_cache) -> None:
org_id = 1234
raw_indexer = indexer
indexer = CachingIndexer(indexer_cache, indexer)
v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0")
v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1")
v2 = raw_indexer.record(use_case_id, org_id, "v1.2.2")
expected_mapping = {"v1.2.0": v0, "v1.2.1": v1, "v1.2.2": v2}
results = indexer.bulk_record(
use_case_id=use_case_id, org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2"}}
)
assert len(results[org_id]) == len(expected_mapping) == 3
for string, id in results[org_id].items():
assert expected_mapping[string] == id
results = indexer.bulk_record(
use_case_id=use_case_id,
org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2", "v1.2.3"}},
)
v3 = raw_indexer.resolve(use_case_id, org_id, "v1.2.3")
expected_mapping["v1.2.3"] = v3
assert len(results[org_id]) == len(expected_mapping) == 4
for string, id in results[org_id].items():
assert expected_mapping[string] == id
fetch_meta = results.get_fetch_metadata()
assert_fetch_type_for_tag_string_set(
fetch_meta[org_id], FetchType.CACHE_HIT, {"v1.2.0", "v1.2.1", "v1.2.2"}
)
assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {"v1.2.3"})
```
###Assistant :
Test that we correctly combine db read results with db write results
for the same organization.
|
1,986 | def compiler_fixup(compiler_so, cc_args):
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
elif not _supports_arm64_builds():
# Look for "-arch arm64" and drop that
for idx in reversed(range(len(compiler_so))):
if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64":
del compiler_so[idx:idx+2]
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
if not indices:
break
index = indices[0]
if compiler_so[index] == '-isysroot':
# Strip this argument and the next one:
del compiler_so[index:index+2]
else:
# It's '-isysroot/some/path' in one arg
del compiler_so[index:index+1]
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
argvar = cc_args
indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')]
if not indices:
argvar = compiler_so
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
for idx in indices:
if argvar[idx] == '-isysroot':
sysroot = argvar[idx+1]
break
else:
sysroot = argvar[idx][len('-isysroot'):]
break
if sysroot and not os.path.isdir(sysroot):
sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n")
sys.stderr.write("Please check your Xcode installation\n")
sys.stderr.flush()
return compiler_so
|
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
| 51 | 268 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def compiler_fixup(compiler_so, cc_args):
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
elif not _supports_arm64_builds():
# Look for "-arch arm64" and drop that
for idx in reversed(range(len(compiler_so))):
if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64":
del compiler_so[idx:idx+2]
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
if not indices:
break
index = indices[0]
if compiler_so[index] == '-isysroot':
# Strip this argument and the next one:
del compiler_so[index:index+2]
else:
# It's '-isysroot/some/path' in one arg
del compiler_so[index:index+1]
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
argvar = cc_args
indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')]
if not indices:
argvar = compiler_so
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
for idx in indices:
if argvar[idx] == '-isysroot':
sysroot = argvar[idx+1]
break
else:
sysroot = argvar[idx][len('-isysroot'):]
break
if sysroot and not os.path.isdir(sysroot):
sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n")
sys.stderr.write("Please check your Xcode installation\n")
sys.stderr.flush()
return compiler_so
```
###Assistant :
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
|
1,987 | def test_copy_page_with_excluded_parental_and_child_relations(self):
try:
# modify excluded fields for this test
EventPage.exclude_fields_in_copy = [
"advert_placements",
"categories",
"signup_link",
]
# set up data
christmas_event = EventPage.objects.get(url_path="/home/events/christmas/")
summer_category = EventCategory.objects.create(name="Summer")
holiday_category = EventCategory.objects.create(name="Holidays")
# add URL (to test excluding a basic field)
christmas_event.signup_link = "https://christmas-is-awesome.com/rsvp"
# add parental many to many relations
christmas_event.categories = (summer_category, holiday_category)
christmas_event.save()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={
"title": "New christmas event",
"slug": "new-christmas-event",
}
)
# check that the signup_link was NOT copied
self.assertEqual(
christmas_event.signup_link, "https://christmas-is-awesome.com/rsvp"
)
self.assertEqual(new_christmas_event.signup_link, "")
# check that original event is untouched
self.assertEqual(
christmas_event.categories.count(),
2,
"Child objects (parental many to many) defined on the superclass were removed from the original page",
)
# check that parental many to many are NOT copied
self.assertEqual(
new_christmas_event.categories.count(),
0,
"Child objects (parental many to many) were copied but should be excluded",
)
# check that child objects on original event were left untouched
self.assertEqual(
christmas_event.advert_placements.count(),
1,
"Child objects defined on the original superclass were edited when copied",
)
# check that child objects were NOT copied
self.assertEqual(
new_christmas_event.advert_placements.count(),
0,
"Child objects defined on the superclass were copied and should not be",
)
finally:
# reset excluded fields for future tests
EventPage.exclude_fields_in_copy = []
| Test that a page will be copied with parental and child relations removed if excluded. | 15 | 197 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_copy_page_with_excluded_parental_and_child_relations(self):
try:
# modify excluded fields for this test
EventPage.exclude_fields_in_copy = [
"advert_placements",
"categories",
"signup_link",
]
# set up data
christmas_event = EventPage.objects.get(url_path="/home/events/christmas/")
summer_category = EventCategory.objects.create(name="Summer")
holiday_category = EventCategory.objects.create(name="Holidays")
# add URL (to test excluding a basic field)
christmas_event.signup_link = "https://christmas-is-awesome.com/rsvp"
# add parental many to many relations
christmas_event.categories = (summer_category, holiday_category)
christmas_event.save()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={
"title": "New christmas event",
"slug": "new-christmas-event",
}
)
# check that the signup_link was NOT copied
self.assertEqual(
christmas_event.signup_link, "https://christmas-is-awesome.com/rsvp"
)
self.assertEqual(new_christmas_event.signup_link, "")
# check that original event is untouched
self.assertEqual(
christmas_event.categories.count(),
2,
"Child objects (parental many to many) defined on the superclass were removed from the original page",
)
# check that parental many to many are NOT copied
self.assertEqual(
new_christmas_event.categories.count(),
0,
"Child objects (parental many to many) were copied but should be excluded",
)
# check that child objects on original event were left untouched
self.assertEqual(
christmas_event.advert_placements.count(),
1,
"Child objects defined on the original superclass were edited when copied",
)
# check that child objects were NOT copied
self.assertEqual(
new_christmas_event.advert_placements.count(),
0,
"Child objects defined on the superclass were copied and should not be",
)
finally:
# reset excluded fields for future tests
EventPage.exclude_fields_in_copy = []
```
###Assistant : Test that a page will be copied with parental and child relations removed if excluded.
|
1,988 | def test_app_model_in_list_body_class(self):
response = self.client.get(reverse("admin:admin_views_section_changelist"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
|
Ensure app and model tag are correctly read by change_list template
| 11 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_app_model_in_list_body_class(self):
response = self.client.get(reverse("admin:admin_views_section_changelist"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
```
###Assistant :
Ensure app and model tag are correctly read by change_list template
|
1,989 | def version_parts(best=False):
# type: (bool) -> Tuple[str, str, str]
return _distro.version_parts(best)
|
Return the version of the current OS distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
| 47 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def version_parts(best=False):
# type: (bool) -> Tuple[str, str, str]
return _distro.version_parts(best)
```
###Assistant :
Return the version of the current OS distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
|
1,990 | def check_status(self):
status = {
'success': False
}
try:
con = self.__connect()
with closing(con) as con:
#TODO: best way to check con.connected ?
status['success'] = True
except Exception as e:
log.error(f'Error connecting to SQL Server {self.database}, {e}!')
status['error'] = e
return status
|
Check the connection of the SQL Server database
:return: success status and error message if error occurs
| 17 | 42 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def check_status(self):
status = {
'success': False
}
try:
con = self.__connect()
with closing(con) as con:
#TODO: best way to check con.connected ?
status['success'] = True
except Exception as e:
log.error(f'Error connecting to SQL Server {self.database}, {e}!')
status['error'] = e
return status
```
###Assistant :
Check the connection of the SQL Server database
:return: success status and error message if error occurs
|
1,991 | def model_from_config(config, custom_objects=None):
if isinstance(config, list):
raise TypeError(
"`model_from_config` expects a dictionary, not a list. "
f"Received: config={config}. Did you meant to use "
"`Sequential.from_config(config)`?"
)
from keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export("keras.models.model_from_yaml") | Instantiates a Keras model from its config.
Usage:
```
# for a Functional API model
tf.keras.Model().from_config(model.get_config())
# for a Sequential model
tf.keras.Sequential().from_config(model.get_config())
```
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
| 57 | 37 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def model_from_config(config, custom_objects=None):
if isinstance(config, list):
raise TypeError(
"`model_from_config` expects a dictionary, not a list. "
f"Received: config={config}. Did you meant to use "
"`Sequential.from_config(config)`?"
)
from keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export("keras.models.model_from_yaml")
```
###Assistant : Instantiates a Keras model from its config.
Usage:
```
# for a Functional API model
tf.keras.Model().from_config(model.get_config())
# for a Sequential model
tf.keras.Sequential().from_config(model.get_config())
```
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
|
1,992 | def test_golden_path(self):
with self.assertNumQueries(0):
result = self.page.cached_content_type
self.assertEqual(result, ContentType.objects.get(id=self.page.content_type_id))
|
The return value should match the value you'd get
if fetching the ContentType from the database,
and shouldn't trigger any database queries when
the ContentType is already in memory.
| 29 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_golden_path(self):
with self.assertNumQueries(0):
result = self.page.cached_content_type
self.assertEqual(result, ContentType.objects.get(id=self.page.content_type_id))
```
###Assistant :
The return value should match the value you'd get
if fetching the ContentType from the database,
and shouldn't trigger any database queries when
the ContentType is already in memory.
|
1,993 | def fold_function_name(function_name):
parts = function_name.split(".")
if len(parts) == 1:
return function_name
tail = parts.pop()
grouped = [list(g) for _, g in groupby(parts)]
|
Fold multiple consecutive occurences of the same property name into a single group, excluding the last component.
foo | foo
foo.foo | foo.foo
foo.foo.foo | {foo#2}.foo
bar.foo.foo | bar.foo.foo
bar.foo.foo.foo | bar.{foo#2}.foo
bar.foo.foo.onError | bar.{foo#2}.onError
bar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError
bar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError
| 41 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fold_function_name(function_name):
parts = function_name.split(".")
if len(parts) == 1:
return function_name
tail = parts.pop()
grouped = [list(g) for _, g in groupby(parts)]
```
###Assistant :
Fold multiple consecutive occurences of the same property name into a single group, excluding the last component.
foo | foo
foo.foo | foo.foo
foo.foo.foo | {foo#2}.foo
bar.foo.foo | bar.foo.foo
bar.foo.foo.foo | bar.{foo#2}.foo
bar.foo.foo.onError | bar.{foo#2}.onError
bar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError
bar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError
|
1,994 | def _laplace_rule_exp(f, t, s, doit=True, **hints):
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
k, func = f.as_independent(t, as_Add=False)
ma1 = func.match(exp(y)*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: multiply with exp (1.5)')
L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints)
try:
r, p, c = L
return (k*r, p+ma2[a], c)
except TypeError:
return k*L
return None
|
This internal helper function tries to transform a product containing the
`exp` function and returns `None` if it cannot do it.
| 21 | 73 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _laplace_rule_exp(f, t, s, doit=True, **hints):
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
k, func = f.as_independent(t, as_Add=False)
ma1 = func.match(exp(y)*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: multiply with exp (1.5)')
L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints)
try:
r, p, c = L
return (k*r, p+ma2[a], c)
except TypeError:
return k*L
return None
```
###Assistant :
This internal helper function tries to transform a product containing the
`exp` function and returns `None` if it cannot do it.
|
1,995 | def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
| Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
| 52 | 37 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
```
###Assistant : Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
|
1,996 | def test_toy_example_collapse_points():
rng = np.random.RandomState(42)
input_dim = 5
two_points = rng.randn(2, input_dim)
X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])
y = [0, 0, 1]
| Test on a toy example of three points that should collapse
We build a simple example: two points from the same class and a point from
a different class in the middle of them. On this simple example, the new
(transformed) points should all collapse into one single point. Indeed, the
objective is 2/(1 + exp(d/2)), with d the euclidean distance between the
two samples from the same class. This is maximized for d=0 (because d>=0),
with an objective equal to 1 (loss=-1.).
| 83 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_toy_example_collapse_points():
rng = np.random.RandomState(42)
input_dim = 5
two_points = rng.randn(2, input_dim)
X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])
y = [0, 0, 1]
```
###Assistant : Test on a toy example of three points that should collapse
We build a simple example: two points from the same class and a point from
a different class in the middle of them. On this simple example, the new
(transformed) points should all collapse into one single point. Indeed, the
objective is 2/(1 + exp(d/2)), with d the euclidean distance between the
two samples from the same class. This is maximized for d=0 (because d>=0),
with an objective equal to 1 (loss=-1.).
|
1,997 | def _get_string_indexer_log_records(caplog):
return [
(
rec.message,
{
k: v
for k, v in rec.__dict__.items()
if k
in (
"string_type",
"is_global_quota",
"num_global_quotas",
"num_global_quotas",
"org_batch_size",
)
},
)
for rec in caplog.records
]
|
Get all log records and relevant extra arguments for easy snapshotting.
| 11 | 31 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_string_indexer_log_records(caplog):
return [
(
rec.message,
{
k: v
for k, v in rec.__dict__.items()
if k
in (
"string_type",
"is_global_quota",
"num_global_quotas",
"num_global_quotas",
"org_batch_size",
)
},
)
for rec in caplog.records
]
```
###Assistant :
Get all log records and relevant extra arguments for easy snapshotting.
|
1,998 | async def test_registered_pin_required(hass, user_form):
with patch(MOCK_API_CONNECT, return_value=True), patch(
MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock
) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True):
mock_device_registered.return_value = True
await hass.config_entries.flow.async_configure(
user_form["flow_id"], user_input=TEST_CREDS
)
| Test if the device is already registered and PIN required. | 10 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_registered_pin_required(hass, user_form):
with patch(MOCK_API_CONNECT, return_value=True), patch(
MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock
) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True):
mock_device_registered.return_value = True
await hass.config_entries.flow.async_configure(
user_form["flow_id"], user_input=TEST_CREDS
)
```
###Assistant : Test if the device is already registered and PIN required.
|
1,999 | def test_send_push_multiple_workers(self):
http_client_mock1 = Mock(spec_set=["post_json_get_json"])
http_client_mock1.post_json_get_json.side_effect = (
lambda *_, **__: defer.succeed({})
)
self.make_worker_hs(
"synapse.app.generic_worker",
{
"worker_name": "pusher1",
"pusher_instances": ["pusher1", "pusher2"],
},
proxied_blacklisted_http_client=http_client_mock1,
)
http_client_mock2 = Mock(spec_set=["post_json_get_json"])
http_client_mock2.post_json_get_json.side_effect = (
lambda *_, **__: defer.succeed({})
)
self.make_worker_hs(
"synapse.app.generic_worker",
{
"worker_name": "pusher2",
"pusher_instances": ["pusher1", "pusher2"],
},
proxied_blacklisted_http_client=http_client_mock2,
)
# We choose a user name that we know should go to pusher1.
event_id = self._create_pusher_and_send_msg("user2")
# Advance time a bit, so the pusher will register something has happened
self.pump()
http_client_mock1.post_json_get_json.assert_called_once()
http_client_mock2.post_json_get_json.assert_not_called()
self.assertEqual(
http_client_mock1.post_json_get_json.call_args[0][0],
"https://push.example.com/_matrix/push/v1/notify",
)
self.assertEqual(
event_id,
http_client_mock1.post_json_get_json.call_args[0][1]["notification"][
"event_id"
],
)
http_client_mock1.post_json_get_json.reset_mock()
http_client_mock2.post_json_get_json.reset_mock()
# Now we choose a user name that we know should go to pusher2.
event_id = self._create_pusher_and_send_msg("user4")
# Advance time a bit, so the pusher will register something has happened
self.pump()
http_client_mock1.post_json_get_json.assert_not_called()
http_client_mock2.post_json_get_json.assert_called_once()
self.assertEqual(
http_client_mock2.post_json_get_json.call_args[0][0],
"https://push.example.com/_matrix/push/v1/notify",
)
self.assertEqual(
event_id,
http_client_mock2.post_json_get_json.call_args[0][1]["notification"][
"event_id"
],
)
| Test that registration works when using sharded pusher workers. | 9 | 133 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_send_push_multiple_workers(self):
http_client_mock1 = Mock(spec_set=["post_json_get_json"])
http_client_mock1.post_json_get_json.side_effect = (
lambda *_, **__: defer.succeed({})
)
self.make_worker_hs(
"synapse.app.generic_worker",
{
"worker_name": "pusher1",
"pusher_instances": ["pusher1", "pusher2"],
},
proxied_blacklisted_http_client=http_client_mock1,
)
http_client_mock2 = Mock(spec_set=["post_json_get_json"])
http_client_mock2.post_json_get_json.side_effect = (
lambda *_, **__: defer.succeed({})
)
self.make_worker_hs(
"synapse.app.generic_worker",
{
"worker_name": "pusher2",
"pusher_instances": ["pusher1", "pusher2"],
},
proxied_blacklisted_http_client=http_client_mock2,
)
# We choose a user name that we know should go to pusher1.
event_id = self._create_pusher_and_send_msg("user2")
# Advance time a bit, so the pusher will register something has happened
self.pump()
http_client_mock1.post_json_get_json.assert_called_once()
http_client_mock2.post_json_get_json.assert_not_called()
self.assertEqual(
http_client_mock1.post_json_get_json.call_args[0][0],
"https://push.example.com/_matrix/push/v1/notify",
)
self.assertEqual(
event_id,
http_client_mock1.post_json_get_json.call_args[0][1]["notification"][
"event_id"
],
)
http_client_mock1.post_json_get_json.reset_mock()
http_client_mock2.post_json_get_json.reset_mock()
# Now we choose a user name that we know should go to pusher2.
event_id = self._create_pusher_and_send_msg("user4")
# Advance time a bit, so the pusher will register something has happened
self.pump()
http_client_mock1.post_json_get_json.assert_not_called()
http_client_mock2.post_json_get_json.assert_called_once()
self.assertEqual(
http_client_mock2.post_json_get_json.call_args[0][0],
"https://push.example.com/_matrix/push/v1/notify",
)
self.assertEqual(
event_id,
http_client_mock2.post_json_get_json.call_args[0][1]["notification"][
"event_id"
],
)
```
###Assistant : Test that registration works when using sharded pusher workers.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.