Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
2,100
def _imaginary_unit_as_coefficient(arg): if getattr(arg, 'is_real', True): return None else: return arg.as_coefficient(S.ImaginaryUnit) @sympify_method_args
Helper to extract symbolic coefficient for imaginary unit
8
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _imaginary_unit_as_coefficient(arg): if getattr(arg, 'is_real', True): return None else: return arg.as_coefficient(S.ImaginaryUnit) @sympify_method_args ``` ###Assistant : Helper to extract symbolic coefficient for imaginary unit
2,101
def from_config(cls, config, custom_objects=None): if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects ) return cls(**config)
Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional user-defined Python objects needed to recreate this optimizer. Returns: An optimizer instance.
53
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_config(cls, config, custom_objects=None): if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects ) return cls(**config) ``` ###Assistant : Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional user-defined Python objects needed to recreate this optimizer. Returns: An optimizer instance.
2,102
def test_generate_pdf_from_mail(self): mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml")) pdf_path = os.path.join(self.parser.tempdir, "html.eml.pdf") with open(pdf_path, "wb") as file: file.write(self.parser.generate_pdf_from_mail(mail)) converted = os.path.join( self.parser.tempdir, "html.eml.pdf.webp", ) run_convert( density=300, scale="500x5000>", alpha="remove", strip=True, trim=False, auto_orient=True, input_file=f"{pdf_path}", # Do net define an index to convert all pages. output_file=converted, logging_group=None, ) self.assertTrue(os.path.isfile(converted)) thumb_hash = self.imagehash(converted) # The created pdf is not reproducible. But the converted image should always look the same. expected_hash = self.imagehash( os.path.join(self.SAMPLE_FILES, "html.eml.pdf.webp"), ) self.assertEqual( thumb_hash, expected_hash, f"PDF looks different. Check if {converted} looks weird.", )
GIVEN: - Fresh start WHEN: - pdf generation from simple eml file is requested THEN: - gotenberg is called and the resulting file is returned and look as expected.
29
81
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_generate_pdf_from_mail(self): mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml")) pdf_path = os.path.join(self.parser.tempdir, "html.eml.pdf") with open(pdf_path, "wb") as file: file.write(self.parser.generate_pdf_from_mail(mail)) converted = os.path.join( self.parser.tempdir, "html.eml.pdf.webp", ) run_convert( density=300, scale="500x5000>", alpha="remove", strip=True, trim=False, auto_orient=True, input_file=f"{pdf_path}", # Do net define an index to convert all pages. output_file=converted, logging_group=None, ) self.assertTrue(os.path.isfile(converted)) thumb_hash = self.imagehash(converted) # The created pdf is not reproducible. But the converted image should always look the same. expected_hash = self.imagehash( os.path.join(self.SAMPLE_FILES, "html.eml.pdf.webp"), ) self.assertEqual( thumb_hash, expected_hash, f"PDF looks different. Check if {converted} looks weird.", ) ``` ###Assistant : GIVEN: - Fresh start WHEN: - pdf generation from simple eml file is requested THEN: - gotenberg is called and the resulting file is returned and look as expected.
2,103
def new(self, degree=0, color=None): (center_x, center_y, angle, inner_radius, outer_radius, outer_color, pointer_color, origin_color, line_width) = self.all pointer_color = color or pointer_color if self.figure != []: for figure in self.figure: self.graph_elem.DeleteFigure(figure) self.figure = [] d = degree - 90 self.all[2] = degree dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi)) dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi)) dx2 = int(outer_radius * math.sin(d / 180 * math.pi)) dy2 = int(outer_radius * math.cos(d / 180 * math.pi)) self.figure.append(self.graph_elem.DrawLine((center_x - dx1, center_y - dy1), (center_x + dx2, center_y + dy2), color=pointer_color, width=line_width)) self.figure.append(self.graph_elem.DrawCircle((center_x, center_y), inner_radius, fill_color=origin_color, line_color=outer_color, line_width=line_width))
Draw new pointer by angle, erase old pointer if exist degree defined as clockwise from negative x-axis.
17
100
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def new(self, degree=0, color=None): (center_x, center_y, angle, inner_radius, outer_radius, outer_color, pointer_color, origin_color, line_width) = self.all pointer_color = color or pointer_color if self.figure != []: for figure in self.figure: self.graph_elem.DeleteFigure(figure) self.figure = [] d = degree - 90 self.all[2] = degree dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi)) dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi)) dx2 = int(outer_radius * math.sin(d / 180 * math.pi)) dy2 = int(outer_radius * math.cos(d / 180 * math.pi)) self.figure.append(self.graph_elem.DrawLine((center_x - dx1, center_y - dy1), (center_x + dx2, center_y + dy2), color=pointer_color, width=line_width)) self.figure.append(self.graph_elem.DrawCircle((center_x, center_y), inner_radius, fill_color=origin_color, line_color=outer_color, line_width=line_width)) ``` ###Assistant : Draw new pointer by angle, erase old pointer if exist degree defined as clockwise from negative x-axis.
2,104
def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True): results = data_out.copy_to_cpu() lod = data_out.lod()[0] check_dir(output_dir) if paths: assert type(paths) is list, "type(paths) is not list." if handle_id < len(paths): unhandled_paths = paths[handle_id:] unhandled_paths_num = len(unhandled_paths) else: unhandled_paths_num = 0 if images is not None: if handle_id < len(images): unhandled_paths = None unhandled_paths_num = len(images) - handle_id else: unhandled_paths_num = 0 output = list() for index in range(len(lod) - 1): output_i = {'data': []} if unhandled_paths and index < unhandled_paths_num: org_img_path = unhandled_paths[index] org_img = Image.open(org_img_path) else: org_img = images[index - unhandled_paths_num] org_img = org_img.astype(np.uint8) org_img = Image.fromarray(org_img[:, :, ::-1]) if visualization: org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index))) org_img.save(org_img_path) org_img_height = org_img.height org_img_width = org_img.width result_i = results[lod[index]:lod[index + 1]] for row in result_i: if len(row) != 6: continue if row[1] < score_thresh: continue category_id = int(row[0]) confidence = row[1] bbox = row[2:] dt = {} dt['label'] = label_names[category_id] dt['confidence'] = float(confidence) dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height) output_i['data'].append(dt) output.append(output_i) if visualization: output_i['save_path'] = draw_bounding_box_on_image(org_img_path, output_i['data'], output_dir) return output
postprocess the lod_tensor produced by Executor.run Args: paths (list[str]): The paths of images. images (list(numpy.ndarray)): images data, shape of each is [H, W, C] data_out (lod_tensor): data output of predictor. output_dir (str): The path to store output images. visualization (bool): Whether to save image or not. score_thresh (float): the low limit of bounding box. label_names (list[str]): label names. handle_id (int): The number of images that have been handled. Returns: res (list[dict]): The result of vehicles detecion. keys include 'data', 'save_path', the corresponding value is: data (dict): the result of object detection, keys include 'left', 'top', 'right', 'bottom', 'label', 'confidence', the corresponding value is: left (float): The X coordinate of the upper left corner of the bounding box; top (float): The Y coordinate of the upper left corner of the bounding box; right (float): The X coordinate of the lower right corner of the bounding box; bottom (float): The Y coordinate of the lower right corner of the bounding box; label (str): The label of detection result; confidence (float): The confidence of detection result. save_path (str): The path to save output images.
181
172
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True): results = data_out.copy_to_cpu() lod = data_out.lod()[0] check_dir(output_dir) if paths: assert type(paths) is list, "type(paths) is not list." if handle_id < len(paths): unhandled_paths = paths[handle_id:] unhandled_paths_num = len(unhandled_paths) else: unhandled_paths_num = 0 if images is not None: if handle_id < len(images): unhandled_paths = None unhandled_paths_num = len(images) - handle_id else: unhandled_paths_num = 0 output = list() for index in range(len(lod) - 1): output_i = {'data': []} if unhandled_paths and index < unhandled_paths_num: org_img_path = unhandled_paths[index] org_img = Image.open(org_img_path) else: org_img = images[index - unhandled_paths_num] org_img = org_img.astype(np.uint8) org_img = Image.fromarray(org_img[:, :, ::-1]) if visualization: org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index))) org_img.save(org_img_path) org_img_height = org_img.height org_img_width = org_img.width result_i = results[lod[index]:lod[index + 1]] for row in result_i: if len(row) != 6: continue if row[1] < score_thresh: continue category_id = int(row[0]) confidence = row[1] bbox = row[2:] dt = {} dt['label'] = label_names[category_id] dt['confidence'] = float(confidence) dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height) output_i['data'].append(dt) output.append(output_i) if visualization: output_i['save_path'] = draw_bounding_box_on_image(org_img_path, output_i['data'], output_dir) return output ``` ###Assistant : postprocess the lod_tensor produced by Executor.run Args: paths (list[str]): The paths of images. images (list(numpy.ndarray)): images data, shape of each is [H, W, C] data_out (lod_tensor): data output of predictor. output_dir (str): The path to store output images. visualization (bool): Whether to save image or not. score_thresh (float): the low limit of bounding box. label_names (list[str]): label names. handle_id (int): The number of images that have been handled. Returns: res (list[dict]): The result of vehicles detecion. keys include 'data', 'save_path', the corresponding value is: data (dict): the result of object detection, keys include 'left', 'top', 'right', 'bottom', 'label', 'confidence', the corresponding value is: left (float): The X coordinate of the upper left corner of the bounding box; top (float): The Y coordinate of the upper left corner of the bounding box; right (float): The X coordinate of the lower right corner of the bounding box; bottom (float): The Y coordinate of the lower right corner of the bounding box; label (str): The label of detection result; confidence (float): The confidence of detection result. save_path (str): The path to save output images.
2,105
def series_with_multilevel_index() -> Series: arrays = [ ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], ["one", "two", "one", "two", "one", "two", "one", "two"], ] tuples = zip(*arrays) index = MultiIndex.from_tuples(tuples) data = np.random.randn(8) ser = Series(data, index=index) ser[3] = np.NaN return ser _narrow_series = { f"{dtype.__name__}-series": tm.make_rand_series(name="a", dtype=dtype) for dtype in tm.NARROW_NP_DTYPES } _index_or_series_objs = {**indices_dict, **_series, **_narrow_series} @pytest.fixture(params=_index_or_series_objs.keys())
Fixture with a Series with a 2-level MultiIndex.
8
59
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def series_with_multilevel_index() -> Series: arrays = [ ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], ["one", "two", "one", "two", "one", "two", "one", "two"], ] tuples = zip(*arrays) index = MultiIndex.from_tuples(tuples) data = np.random.randn(8) ser = Series(data, index=index) ser[3] = np.NaN return ser _narrow_series = { f"{dtype.__name__}-series": tm.make_rand_series(name="a", dtype=dtype) for dtype in tm.NARROW_NP_DTYPES } _index_or_series_objs = {**indices_dict, **_series, **_narrow_series} @pytest.fixture(params=_index_or_series_objs.keys()) ``` ###Assistant : Fixture with a Series with a 2-level MultiIndex.
2,106
def _async_stop(self) -> None: if self._async_unsub_state_changed: self._async_unsub_state_changed() self._async_unsub_state_changed = None
Unregister the group from Home Assistant. This method must be run in the event loop.
15
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _async_stop(self) -> None: if self._async_unsub_state_changed: self._async_unsub_state_changed() self._async_unsub_state_changed = None ``` ###Assistant : Unregister the group from Home Assistant. This method must be run in the event loop.
2,107
def test_database_connection_url(generate_test_database_connection_url): url = generate_test_database_connection_url if url is None: yield None else: # TODO: https://github.com/PrefectHQ/orion/issues/2045 # Also temporarily override the environment variable, so that child # subprocesses that we spin off are correctly configured as well original_envvar = os.environ.get("PREFECT_ORION_DATABASE_CONNECTION_URL") os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = url with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}): yield url os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = original_envvar @pytest.fixture(scope="session")
Update the setting for the database connection url to the generated value from `generate_test_database_connection_url` This _must_ be separate from the generation of the test url because async fixtures are run in a separate context from the test suite.
38
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_database_connection_url(generate_test_database_connection_url): url = generate_test_database_connection_url if url is None: yield None else: # TODO: https://github.com/PrefectHQ/orion/issues/2045 # Also temporarily override the environment variable, so that child # subprocesses that we spin off are correctly configured as well original_envvar = os.environ.get("PREFECT_ORION_DATABASE_CONNECTION_URL") os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = url with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}): yield url os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = original_envvar @pytest.fixture(scope="session") ``` ###Assistant : Update the setting for the database connection url to the generated value from `generate_test_database_connection_url` This _must_ be separate from the generation of the test url because async fixtures are run in a separate context from the test suite.
2,108
def define_by_run_func(trial) -> Optional[Dict[str, Any]]: # This param is not used in the objective function. activation = trial.suggest_categorical("activation", ["relu", "tanh"]) trial.suggest_float("width", 0, 20) trial.suggest_float("height", -100, 100) # Define-by-run allows for conditional search spaces. if activation == "relu": trial.suggest_float("mult", 1, 2) # Return all constants in a dictionary. return {"steps": 100}
Define-by-run function to create the search space. Ensure no actual computation takes place here. That should go into the trainable passed to ``Tuner`` (in this example, that's ``easy_objective``). For more information, see https://optuna.readthedocs.io/en/stable\ /tutorial/10_key_features/002_configurations.html This function should either return None or a dict with constant values.
46
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def define_by_run_func(trial) -> Optional[Dict[str, Any]]: # This param is not used in the objective function. activation = trial.suggest_categorical("activation", ["relu", "tanh"]) trial.suggest_float("width", 0, 20) trial.suggest_float("height", -100, 100) # Define-by-run allows for conditional search spaces. if activation == "relu": trial.suggest_float("mult", 1, 2) # Return all constants in a dictionary. return {"steps": 100} ``` ###Assistant : Define-by-run function to create the search space. Ensure no actual computation takes place here. That should go into the trainable passed to ``Tuner`` (in this example, that's ``easy_objective``). For more information, see https://optuna.readthedocs.io/en/stable\ /tutorial/10_key_features/002_configurations.html This function should either return None or a dict with constant values.
2,109
def _compile_to_sklearn(self, expr): sklearn_pipeline_str = generate_pipeline_code( expr_to_tree(expr, self._pset), self.operators ) sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context) sklearn_pipeline.memory = self._memory if self.random_state: # Fix random state when the operator allows set_param_recursive( sklearn_pipeline.steps, "random_state", self.random_state ) return sklearn_pipeline
Compile a DEAP pipeline into a sklearn pipeline. Parameters ---------- expr: DEAP individual The DEAP pipeline to be compiled Returns ------- sklearn_pipeline: sklearn.pipeline.Pipeline
23
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _compile_to_sklearn(self, expr): sklearn_pipeline_str = generate_pipeline_code( expr_to_tree(expr, self._pset), self.operators ) sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context) sklearn_pipeline.memory = self._memory if self.random_state: # Fix random state when the operator allows set_param_recursive( sklearn_pipeline.steps, "random_state", self.random_state ) return sklearn_pipeline ``` ###Assistant : Compile a DEAP pipeline into a sklearn pipeline. Parameters ---------- expr: DEAP individual The DEAP pipeline to be compiled Returns ------- sklearn_pipeline: sklearn.pipeline.Pipeline
2,110
def equals(self, other, failing_expression=False): if self.shape != getattr(other, 'shape', None): return False rv = True for i in range(self.rows): for j in range(self.cols): ans = self[i, j].equals(other[i, j], failing_expression) if ans is False: return False elif ans is not True and rv is True: rv = ans return rv
Applies ``equals`` to corresponding elements of the matrices, trying to prove that the elements are equivalent, returning True if they are, False if any pair is not, and None (or the first failing expression if failing_expression is True) if it cannot be decided if the expressions are equivalent or not. This is, in general, an expensive operation. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x >>> A = Matrix([x*(x - 1), 0]) >>> B = Matrix([x**2 - x, 0]) >>> A == B False >>> A.simplify() == B.simplify() True >>> A.equals(B) True >>> A.equals(2) False See Also ======== sympy.core.expr.Expr.equals
103
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def equals(self, other, failing_expression=False): if self.shape != getattr(other, 'shape', None): return False rv = True for i in range(self.rows): for j in range(self.cols): ans = self[i, j].equals(other[i, j], failing_expression) if ans is False: return False elif ans is not True and rv is True: rv = ans return rv ``` ###Assistant : Applies ``equals`` to corresponding elements of the matrices, trying to prove that the elements are equivalent, returning True if they are, False if any pair is not, and None (or the first failing expression if failing_expression is True) if it cannot be decided if the expressions are equivalent or not. This is, in general, an expensive operation. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x >>> A = Matrix([x*(x - 1), 0]) >>> B = Matrix([x**2 - x, 0]) >>> A == B False >>> A.simplify() == B.simplify() True >>> A.equals(B) True >>> A.equals(2) False See Also ======== sympy.core.expr.Expr.equals
2,111
def load(self, loader): loader.add_option( "block_list", Sequence[str], [], )
Block matching requests and return an empty response with the specified HTTP status. Option syntax is "/flow-filter/status-code", where flow-filter describes which requests this rule should be applied to and status-code is the HTTP status code to return for blocked requests. The separator ("/" in the example) can be any character. Setting a non-standard status code of 444 will close the connection without sending a response.
65
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load(self, loader): loader.add_option( "block_list", Sequence[str], [], ) ``` ###Assistant : Block matching requests and return an empty response with the specified HTTP status. Option syntax is "/flow-filter/status-code", where flow-filter describes which requests this rule should be applied to and status-code is the HTTP status code to return for blocked requests. The separator ("/" in the example) can be any character. Setting a non-standard status code of 444 will close the connection without sending a response.
2,112
def train(self, tagged_docs): m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs] m2 = [self.target_lang_vec.dv[item.tags].flatten() for item in tagged_docs] self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0] return self.translation_matrix
Build the translation matrix to map from the source model's vectors to target model's vectors Parameters ---------- tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents that will be used for training, both the source language document vector and target language document vector trained on those tagged documents. Returns ------- numpy.ndarray Translation matrix that maps from the source model's vectors to target model's vectors.
61
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def train(self, tagged_docs): m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs] m2 = [self.target_lang_vec.dv[item.tags].flatten() for item in tagged_docs] self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0] return self.translation_matrix ``` ###Assistant : Build the translation matrix to map from the source model's vectors to target model's vectors Parameters ---------- tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents that will be used for training, both the source language document vector and target language document vector trained on those tagged documents. Returns ------- numpy.ndarray Translation matrix that maps from the source model's vectors to target model's vectors.
2,113
def serialize(input, tree="etree", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding)
Serializes the input token stream using the specified treewalker :arg input: the token stream to serialize :arg tree: the treewalker to use :arg encoding: the encoding to use :arg serializer_opts: any options to pass to the :py:class:`html5lib.serializer.HTMLSerializer` that gets created :returns: the tree serialized as a string Example: >>> from html5lib.html5parser import parse >>> from html5lib.serializer import serialize >>> token_stream = parse('<html><body><p>Hi!</p></body></html>') >>> serialize(token_stream, omit_optional_tags=False) '<html><head></head><body><p>Hi!</p></body></html>'
66
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serialize(input, tree="etree", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding) ``` ###Assistant : Serializes the input token stream using the specified treewalker :arg input: the token stream to serialize :arg tree: the treewalker to use :arg encoding: the encoding to use :arg serializer_opts: any options to pass to the :py:class:`html5lib.serializer.HTMLSerializer` that gets created :returns: the tree serialized as a string Example: >>> from html5lib.html5parser import parse >>> from html5lib.serializer import serialize >>> token_stream = parse('<html><body><p>Hi!</p></body></html>') >>> serialize(token_stream, omit_optional_tags=False) '<html><head></head><body><p>Hi!</p></body></html>'
2,114
def set_client_cli_parser(parser=None): if not parser: from jina.parsers.base import set_base_parser parser = set_base_parser() from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser from jina.parsers.client import ( mixin_client_features_parser, mixin_comm_protocol_parser, ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) mixin_comm_protocol_parser(parser) return parser
Set the parser for the cli client :param parser: an optional existing parser to build upon :return: the parser
19
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_client_cli_parser(parser=None): if not parser: from jina.parsers.base import set_base_parser parser = set_base_parser() from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser from jina.parsers.client import ( mixin_client_features_parser, mixin_comm_protocol_parser, ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) mixin_comm_protocol_parser(parser) return parser ``` ###Assistant : Set the parser for the cli client :param parser: an optional existing parser to build upon :return: the parser
2,115
def send_sale_toggle_notification(info, instance, catalogue): manager = info.context.plugins now = datetime.now(pytz.utc) start_date = instance.start_date end_date = instance.end_date if (start_date and start_date <= now) and (not end_date or not end_date <= now): manager.sale_toggle(instance, catalogue) instance.notification_sent_datetime = now instance.save(update_fields=["notification_sent_datetime"])
Send a notification about starting or ending sale if it hasn't been sent yet. Send the notification when the start date is before the current date and the sale is not already finished.
33
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def send_sale_toggle_notification(info, instance, catalogue): manager = info.context.plugins now = datetime.now(pytz.utc) start_date = instance.start_date end_date = instance.end_date if (start_date and start_date <= now) and (not end_date or not end_date <= now): manager.sale_toggle(instance, catalogue) instance.notification_sent_datetime = now instance.save(update_fields=["notification_sent_datetime"]) ``` ###Assistant : Send a notification about starting or ending sale if it hasn't been sent yet. Send the notification when the start date is before the current date and the sale is not already finished.
2,116
def bcoo_todense(data, indices, *, spinfo): return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spinfo) @bcoo_todense_p.def_impl
Convert batched sparse matrix to a dense matrix. Args: data : array of shape ``batch_dims + (nse,) + block_dims``. indices : array of shape ``batch_dims + (n_sparse, nse)`` spinfo : BCOOInfo. In particular, this includes the shape of the matrix, which is equal to ``batch_dims + sparse_dims + block_dims`` where ``len(sparse_dims) == n_sparse`` Returns: mat : array with specified shape and dtype matching ``data``
64
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bcoo_todense(data, indices, *, spinfo): return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spinfo) @bcoo_todense_p.def_impl ``` ###Assistant : Convert batched sparse matrix to a dense matrix. Args: data : array of shape ``batch_dims + (nse,) + block_dims``. indices : array of shape ``batch_dims + (n_sparse, nse)`` spinfo : BCOOInfo. In particular, this includes the shape of the matrix, which is equal to ``batch_dims + sparse_dims + block_dims`` where ``len(sparse_dims) == n_sparse`` Returns: mat : array with specified shape and dtype matching ``data``
2,117
def ignore_ray_errors(self) -> Iterator[ResultOrError]: return self._Iterator( [r for r in self.result_or_errors if not isinstance(r.get(), RayError)] )
Return an iterator over the results, skipping only Ray errors. Similar to ignore_errors, but only skips Errors raised from the Ray framework. This is useful for application that wants to handle errors from user code differently.
36
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ignore_ray_errors(self) -> Iterator[ResultOrError]: return self._Iterator( [r for r in self.result_or_errors if not isinstance(r.get(), RayError)] ) ``` ###Assistant : Return an iterator over the results, skipping only Ray errors. Similar to ignore_errors, but only skips Errors raised from the Ray framework. This is useful for application that wants to handle errors from user code differently.
2,118
def certificate_was_accepted(self) -> None: if not self.is_overridable(): return False if self._certificate_accepted is None: raise ValueError("No decision taken yet") return self._certificate_accepted @dataclasses.dataclass
Check whether the certificate was accepted by the user.
9
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def certificate_was_accepted(self) -> None: if not self.is_overridable(): return False if self._certificate_accepted is None: raise ValueError("No decision taken yet") return self._certificate_accepted @dataclasses.dataclass ``` ###Assistant : Check whether the certificate was accepted by the user.
2,119
def mask(self, row_indices, col_indices): return ( self.force_materialization() .list_of_partitions_to_combine[0] .mask(row_indices, col_indices) )
Create (synchronously) a mask that extracts the indices provided. Parameters ---------- row_indices : list-like, slice or label The row labels for the rows to extract. col_indices : list-like, slice or label The column labels for the columns to extract. Returns ------- PandasOnRayDataframeVirtualPartition A new ``PandasOnRayDataframeVirtualPartition`` object, materialized.
47
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mask(self, row_indices, col_indices): return ( self.force_materialization() .list_of_partitions_to_combine[0] .mask(row_indices, col_indices) ) ``` ###Assistant : Create (synchronously) a mask that extracts the indices provided. Parameters ---------- row_indices : list-like, slice or label The row labels for the rows to extract. col_indices : list-like, slice or label The column labels for the columns to extract. Returns ------- PandasOnRayDataframeVirtualPartition A new ``PandasOnRayDataframeVirtualPartition`` object, materialized.
2,120
def left_integral3D(facets, index, expr, vertices, hp_param, degree): value = S.Zero facet = facets[index] x0 = vertices[facet[0]] facet_len = len(facet) for i, fac in enumerate(facet): side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) value += distance_to_side(x0, side, hp_param[0]) * \ lineseg_integrate(facet, i, side, expr, degree) return value
Computes the left integral of Eq 10 in Chin et al. Explanation =========== For the 3D case, this is the sum of the integral values over constituting line segments of the face (which is accessed by facets[index]) multiplied by the distance between the first point of facet and that line segment. Parameters ========== facets : List of faces of the 3-Polytope. index : Index of face over which integral is to be calculated. expr : Input polynomial. vertices : List of vertices that constitute the 3-Polytope. hp_param : The hyperplane parameters of the face. degree : Degree of the ``expr``. Examples ======== >>> from sympy.integrals.intpoly import left_integral3D >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ [3, 1, 0, 2], [0, 4, 6, 2]] >>> facets = cube[1:] >>> vertices = cube[0] >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0) -50
177
46
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def left_integral3D(facets, index, expr, vertices, hp_param, degree): value = S.Zero facet = facets[index] x0 = vertices[facet[0]] facet_len = len(facet) for i, fac in enumerate(facet): side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) value += distance_to_side(x0, side, hp_param[0]) * \ lineseg_integrate(facet, i, side, expr, degree) return value ``` ###Assistant : Computes the left integral of Eq 10 in Chin et al. Explanation =========== For the 3D case, this is the sum of the integral values over constituting line segments of the face (which is accessed by facets[index]) multiplied by the distance between the first point of facet and that line segment. Parameters ========== facets : List of faces of the 3-Polytope. index : Index of face over which integral is to be calculated. expr : Input polynomial. vertices : List of vertices that constitute the 3-Polytope. hp_param : The hyperplane parameters of the face. degree : Degree of the ``expr``. Examples ======== >>> from sympy.integrals.intpoly import left_integral3D >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ [3, 1, 0, 2], [0, 4, 6, 2]] >>> facets = cube[1:] >>> vertices = cube[0] >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0) -50
2,121
def xdropout(self, inputs): mask = tf.cast( 1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool, ) scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) if self.drop_prob > 0: inputs = tf.where(mask, 0.0, inputs) * scale
Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
17
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def xdropout(self, inputs): mask = tf.cast( 1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool, ) scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) if self.drop_prob > 0: inputs = tf.where(mask, 0.0, inputs) * scale ``` ###Assistant : Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
2,122
def test_deltas_that_melt_dataframes(self): deltas = self._get_deltas_that_melt_dataframes() for delta in deltas: el = delta(DATAFRAME) el._legacy_add_rows(NEW_ROWS) df_proto = _get_data_frame(self.get_delta_from_queue()) # Test that the add_rows delta is properly melted rows = df_proto.data.cols[0].int64s.data self.assertEqual([2, 3, 4, 2, 3, 4], rows)
Some element types require that their dataframes are 'melted' (https://pandas.pydata.org/docs/reference/api/pandas.melt.html) before being sent to the frontend. Test that the melting occurs.
21
35
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_deltas_that_melt_dataframes(self): deltas = self._get_deltas_that_melt_dataframes() for delta in deltas: el = delta(DATAFRAME) el._legacy_add_rows(NEW_ROWS) df_proto = _get_data_frame(self.get_delta_from_queue()) # Test that the add_rows delta is properly melted rows = df_proto.data.cols[0].int64s.data self.assertEqual([2, 3, 4, 2, 3, 4], rows) ``` ###Assistant : Some element types require that their dataframes are 'melted' (https://pandas.pydata.org/docs/reference/api/pandas.melt.html) before being sent to the frontend. Test that the melting occurs.
2,123
def sensors_temperatures(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 # https://github.com/nicolargo/glances/issues/1060 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) basenames = sorted(set([x.split('_')[0] for x in basenames])) # Only add the coretemp hwmon entries if they're not already in # /sys/class/hwmon/ # https://github.com/giampaolo/psutil/issues/1708 # https://github.com/giampaolo/psutil/pull/1648 basenames2 = glob.glob( '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*') repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') for name in basenames2: altname = repl.sub('/sys/class/hwmon/', name) if altname not in basenames: basenames.append(name) for base in basenames: try: path = base + '_input' current = float(bcat(path)) / 1000.0 path = os.path.join(os.path.dirname(base), 'name') unit_name = cat(path).strip() except (IOError, OSError, ValueError): # A lot of things can go wrong here, so let's just skip the # whole entry. Sure thing is Linux's /sys/class/hwmon really # is a stinky broken mess. # https://github.com/giampaolo/psutil/issues/1009 # https://github.com/giampaolo/psutil/issues/1101 # https://github.com/giampaolo/psutil/issues/1129 # https://github.com/giampaolo/psutil/issues/1245 # https://github.com/giampaolo/psutil/issues/1323 continue high = bcat(base + '_max', fallback=None) critical = bcat(base + '_crit', fallback=None) label = cat(base + '_label', fallback='').strip() if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append((label, current, high, critical)) # Indication that no sensors were detected in /sys/class/hwmon/ if not basenames: basenames = glob.glob('/sys/class/thermal/thermal_zone*') basenames = sorted(set(basenames)) for base in basenames: try: path = os.path.join(base, 'temp') current = float(bcat(path)) / 1000.0 path = os.path.join(base, 'type') unit_name = cat(path).strip() except (IOError, OSError, ValueError) as err: debug(err) continue trip_paths = glob.glob(base + '/trip_point*') trip_points = set(['_'.join( os.path.basename(p).split('_')[0:3]) for p in trip_paths]) critical = None high = None for trip_point in trip_points: path = os.path.join(base, trip_point + "_type") trip_type = cat(path, fallback='').strip() if trip_type == 'critical': critical = bcat(os.path.join(base, trip_point + "_temp"), fallback=None) elif trip_type == 'high': high = bcat(os.path.join(base, trip_point + "_temp"), fallback=None) if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append(('', current, high, critical)) return dict(ret)
Return hardware (CPU and others) temperatures as a dict including hardware name, label, current, max and critical temperatures. Implementation notes: - /sys/class/hwmon looks like the most recent interface to retrieve this info, and this implementation relies on it only (old distros will probably use something else) - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon - /sys/class/thermal/thermal_zone* is another one but it's more difficult to parse
65
326
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sensors_temperatures(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 # https://github.com/nicolargo/glances/issues/1060 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) basenames = sorted(set([x.split('_')[0] for x in basenames])) # Only add the coretemp hwmon entries if they're not already in # /sys/class/hwmon/ # https://github.com/giampaolo/psutil/issues/1708 # https://github.com/giampaolo/psutil/pull/1648 basenames2 = glob.glob( '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*') repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') for name in basenames2: altname = repl.sub('/sys/class/hwmon/', name) if altname not in basenames: basenames.append(name) for base in basenames: try: path = base + '_input' current = float(bcat(path)) / 1000.0 path = os.path.join(os.path.dirname(base), 'name') unit_name = cat(path).strip() except (IOError, OSError, ValueError): # A lot of things can go wrong here, so let's just skip the # whole entry. Sure thing is Linux's /sys/class/hwmon really # is a stinky broken mess. # https://github.com/giampaolo/psutil/issues/1009 # https://github.com/giampaolo/psutil/issues/1101 # https://github.com/giampaolo/psutil/issues/1129 # https://github.com/giampaolo/psutil/issues/1245 # https://github.com/giampaolo/psutil/issues/1323 continue high = bcat(base + '_max', fallback=None) critical = bcat(base + '_crit', fallback=None) label = cat(base + '_label', fallback='').strip() if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append((label, current, high, critical)) # Indication that no sensors were detected in /sys/class/hwmon/ if not basenames: basenames = glob.glob('/sys/class/thermal/thermal_zone*') basenames = sorted(set(basenames)) for base in basenames: try: path = os.path.join(base, 'temp') current = float(bcat(path)) / 1000.0 path = os.path.join(base, 'type') unit_name = cat(path).strip() except (IOError, OSError, ValueError) as err: debug(err) continue trip_paths = glob.glob(base + '/trip_point*') trip_points = set(['_'.join( os.path.basename(p).split('_')[0:3]) for p in trip_paths]) critical = None high = None for trip_point in trip_points: path = os.path.join(base, trip_point + "_type") trip_type = cat(path, fallback='').strip() if trip_type == 'critical': critical = bcat(os.path.join(base, trip_point + "_temp"), fallback=None) elif trip_type == 'high': high = bcat(os.path.join(base, trip_point + "_temp"), fallback=None) if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append(('', current, high, critical)) return dict(ret) ``` ###Assistant : Return hardware (CPU and others) temperatures as a dict including hardware name, label, current, max and critical temperatures. Implementation notes: - /sys/class/hwmon looks like the most recent interface to retrieve this info, and this implementation relies on it only (old distros will probably use something else) - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon - /sys/class/thermal/thermal_zone* is another one but it's more difficult to parse
2,124
def preview_revision_for_task(request, page_id, task_id): page = get_object_or_404(Page, id=page_id) task = get_object_or_404(Task, id=task_id).specific try: task_state = TaskState.objects.get( page_revision__page=page, task=task, status=TaskState.STATUS_IN_PROGRESS ) except TaskState.DoesNotExist: messages.error( request, _( "The page '{0}' is not currently awaiting moderation in task '{1}'." ).format(page.get_admin_display_title(), task.name), ) return redirect("wagtailadmin_home") revision = task_state.page_revision if not task.get_actions(page, request.user): raise PermissionDenied page_to_view = revision.as_page_object() # TODO: provide workflow actions within this view return page_to_view.make_preview_request( request, page.default_preview_mode, extra_request_attrs={"revision_id": revision.id}, )
Preview the revision linked to the in-progress TaskState of a specified Task. This enables pages in moderation to be edited and new TaskStates linked to the new revisions created, with preview links remaining valid
34
68
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preview_revision_for_task(request, page_id, task_id): page = get_object_or_404(Page, id=page_id) task = get_object_or_404(Task, id=task_id).specific try: task_state = TaskState.objects.get( page_revision__page=page, task=task, status=TaskState.STATUS_IN_PROGRESS ) except TaskState.DoesNotExist: messages.error( request, _( "The page '{0}' is not currently awaiting moderation in task '{1}'." ).format(page.get_admin_display_title(), task.name), ) return redirect("wagtailadmin_home") revision = task_state.page_revision if not task.get_actions(page, request.user): raise PermissionDenied page_to_view = revision.as_page_object() # TODO: provide workflow actions within this view return page_to_view.make_preview_request( request, page.default_preview_mode, extra_request_attrs={"revision_id": revision.id}, ) ``` ###Assistant : Preview the revision linked to the in-progress TaskState of a specified Task. This enables pages in moderation to be edited and new TaskStates linked to the new revisions created, with preview links remaining valid
2,125
def push_async_callback(self, callback, /, *args, **kwds): _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper, False) return callback # Allow use as a decorator
Registers an arbitrary coroutine function and arguments. Cannot suppress exceptions.
10
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def push_async_callback(self, callback, /, *args, **kwds): _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper, False) return callback # Allow use as a decorator ``` ###Assistant : Registers an arbitrary coroutine function and arguments. Cannot suppress exceptions.
2,126
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn(instring, loc) nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc
Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional ``maxMatches`` argument, to clip scanning after 'n' matches are found. If ``overlap`` is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See :class:`parseString` for more information on parsing strings with embedded tabs. Example:: source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens, start, end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd
99
135
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn(instring, loc) nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc ``` ###Assistant : Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional ``maxMatches`` argument, to clip scanning after 'n' matches are found. If ``overlap`` is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See :class:`parseString` for more information on parsing strings with embedded tabs. Example:: source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens, start, end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd
2,127
def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnDaskDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, )
Add a function to the call queue. Parameters ---------- func : callable Function to be added to the call queue. *args : iterable Additional positional arguments to be passed in `func`. length : distributed.Future or int, optional Length, or reference to length, of wrapped ``pandas.DataFrame``. width : distributed.Future or int, optional Width, or reference to width, of wrapped ``pandas.DataFrame``. **kwargs : dict Additional keyword arguments to be passed in `func`. Returns ------- PandasOnDaskDataframePartition A new ``PandasOnDaskDataframePartition`` object. Notes ----- The keyword arguments are sent as a dictionary.
87
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnDaskDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, ) ``` ###Assistant : Add a function to the call queue. Parameters ---------- func : callable Function to be added to the call queue. *args : iterable Additional positional arguments to be passed in `func`. length : distributed.Future or int, optional Length, or reference to length, of wrapped ``pandas.DataFrame``. width : distributed.Future or int, optional Width, or reference to width, of wrapped ``pandas.DataFrame``. **kwargs : dict Additional keyword arguments to be passed in `func`. Returns ------- PandasOnDaskDataframePartition A new ``PandasOnDaskDataframePartition`` object. Notes ----- The keyword arguments are sent as a dictionary.
2,128
def to_json_string(self) -> str: dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
23
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_json_string(self) -> str: dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" ``` ###Assistant : Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
2,129
def one_of_permissions_or_auth_filter_required(context, permissions): if not permissions: return True authorization_filters = [ p for p in permissions if isinstance(p, AuthorizationFilters) ] permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)] granted_by_permissions = False granted_by_authorization_filters = False # TODO: move this function from graphql to core from saleor.graphql.utils import get_user_or_app_from_context is_app = bool(getattr(context, "app", None)) requestor = get_user_or_app_from_context(context) if permissions: perm_checks_results = [] for permission in permissions: if is_app and permission == AccountPermissions.MANAGE_STAFF: # `MANAGE_STAFF` permission for apps is not supported, as apps using it # could create a staff user with full access. perm_checks_results.append(False) else: perm_checks_results.append(requestor.has_perm(permission)) granted_by_permissions = any(perm_checks_results) if authorization_filters: auth_filters_results = [] for p in authorization_filters: perm_fn = resolve_authorization_filter_fn(p) if perm_fn: res = perm_fn(context) auth_filters_results.append(bool(res)) granted_by_authorization_filters = any(auth_filters_results) return granted_by_permissions or granted_by_authorization_filters
Determine whether user or app has rights to perform an action. The `context` parameter is the Context instance associated with the request.
22
125
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def one_of_permissions_or_auth_filter_required(context, permissions): if not permissions: return True authorization_filters = [ p for p in permissions if isinstance(p, AuthorizationFilters) ] permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)] granted_by_permissions = False granted_by_authorization_filters = False # TODO: move this function from graphql to core from saleor.graphql.utils import get_user_or_app_from_context is_app = bool(getattr(context, "app", None)) requestor = get_user_or_app_from_context(context) if permissions: perm_checks_results = [] for permission in permissions: if is_app and permission == AccountPermissions.MANAGE_STAFF: # `MANAGE_STAFF` permission for apps is not supported, as apps using it # could create a staff user with full access. perm_checks_results.append(False) else: perm_checks_results.append(requestor.has_perm(permission)) granted_by_permissions = any(perm_checks_results) if authorization_filters: auth_filters_results = [] for p in authorization_filters: perm_fn = resolve_authorization_filter_fn(p) if perm_fn: res = perm_fn(context) auth_filters_results.append(bool(res)) granted_by_authorization_filters = any(auth_filters_results) return granted_by_permissions or granted_by_authorization_filters ``` ###Assistant : Determine whether user or app has rights to perform an action. The `context` parameter is the Context instance associated with the request.
2,130
def _dirmatch(path, matchwith): matchlen = len(matchwith) if (path.startswith(matchwith) and path[matchlen:matchlen + 1] in [os.sep, '']): return True return False
Check if path is within matchwith's tree. >>> _dirmatch('/home/foo/bar', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar2', '/home/foo/bar') False >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar') False
27
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _dirmatch(path, matchwith): matchlen = len(matchwith) if (path.startswith(matchwith) and path[matchlen:matchlen + 1] in [os.sep, '']): return True return False ``` ###Assistant : Check if path is within matchwith's tree. >>> _dirmatch('/home/foo/bar', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar2', '/home/foo/bar') False >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar') False
2,131
def deform_sampling(self, feat, offset): # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y
Sampling the feature x according to offset. Args: feat (Tensor): Feature offset (Tensor): Spatial offset for feature sampling
18
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deform_sampling(self, feat, offset): # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y ``` ###Assistant : Sampling the feature x according to offset. Args: feat (Tensor): Feature offset (Tensor): Spatial offset for feature sampling
2,132
def page_identity(self, response, request_json=None): request_path = response.request.path_url if request_path == '/migrations_notran/': raise exc.IsMigrating('You have been redirected to the migration-in-progress page.') request_method = response.request.method.lower() self.last_elapsed = response.elapsed if isinstance(request_json, dict) and 'ds' in request_json: ds = request_json.ds else: ds = None data = self.extract_data(response) exc_str = "%s (%s) received" % (http.responses[response.status_code], response.status_code) exception = exception_from_status_code(response.status_code) if exception: raise exception(exc_str, data) if response.status_code in (http.OK, http.CREATED, http.ACCEPTED): # Not all JSON responses include a URL. Grab it from the request # object, if needed. if 'url' in data: endpoint = data['url'] else: endpoint = request_path data = objectify_response_json(response) if request_method in ('get', 'patch', 'put'): # Update existing resource and return it if are_same_endpoint(self.endpoint, request_path): self.json = data self.r = response return self registered_type = get_registered_page(request_path, request_method) return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds) elif response.status_code == http.FORBIDDEN: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) elif is_license_exceeded(response): raise exc.LicenseExceeded(exc_str, data) else: raise exc.Forbidden(exc_str, data) elif response.status_code == http.BAD_REQUEST: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) if is_duplicate_error(response): raise exc.Duplicate(exc_str, data) else: raise exc.BadRequest(exc_str, data) else: raise exc.Unknown(exc_str, data)
Takes a `requests.Response` and returns a new __item_class__ instance if the request method is not a get, or returns a __class__ instance if the request path is different than the caller's `endpoint`.
32
171
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def page_identity(self, response, request_json=None): request_path = response.request.path_url if request_path == '/migrations_notran/': raise exc.IsMigrating('You have been redirected to the migration-in-progress page.') request_method = response.request.method.lower() self.last_elapsed = response.elapsed if isinstance(request_json, dict) and 'ds' in request_json: ds = request_json.ds else: ds = None data = self.extract_data(response) exc_str = "%s (%s) received" % (http.responses[response.status_code], response.status_code) exception = exception_from_status_code(response.status_code) if exception: raise exception(exc_str, data) if response.status_code in (http.OK, http.CREATED, http.ACCEPTED): # Not all JSON responses include a URL. Grab it from the request # object, if needed. if 'url' in data: endpoint = data['url'] else: endpoint = request_path data = objectify_response_json(response) if request_method in ('get', 'patch', 'put'): # Update existing resource and return it if are_same_endpoint(self.endpoint, request_path): self.json = data self.r = response return self registered_type = get_registered_page(request_path, request_method) return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds) elif response.status_code == http.FORBIDDEN: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) elif is_license_exceeded(response): raise exc.LicenseExceeded(exc_str, data) else: raise exc.Forbidden(exc_str, data) elif response.status_code == http.BAD_REQUEST: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) if is_duplicate_error(response): raise exc.Duplicate(exc_str, data) else: raise exc.BadRequest(exc_str, data) else: raise exc.Unknown(exc_str, data) ``` ###Assistant : Takes a `requests.Response` and returns a new __item_class__ instance if the request method is not a get, or returns a __class__ instance if the request path is different than the caller's `endpoint`.
2,133
def _installed_conda(self): if not self._is_conda: return None with Popen("conda list", shell=True, stdout=PIPE, stderr=PIPE) as conda: stdout, stderr = conda.communicate() if stderr: return "Could not get package list" installed = stdout.decode(self._encoding, errors="replace").splitlines() return "\n".join(installed)
str: The list of installed Conda packages within Faceswap's scope.
10
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _installed_conda(self): if not self._is_conda: return None with Popen("conda list", shell=True, stdout=PIPE, stderr=PIPE) as conda: stdout, stderr = conda.communicate() if stderr: return "Could not get package list" installed = stdout.decode(self._encoding, errors="replace").splitlines() return "\n".join(installed) ``` ###Assistant : str: The list of installed Conda packages within Faceswap's scope.
2,134
def embedding(self, input_seq): seq_embeddings = self.item_embedding_layer(input_seq) seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5) # FIXME positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0) positional_seq = tf.tile(positional_seq, [tf.shape(input_seq)[0], 1]) positional_embeddings = self.positional_embedding_layer(positional_seq) return seq_embeddings, positional_embeddings
Compute the sequence and positional embeddings. Args: input_seq (tf.Tensor): Input sequence Returns: tf.Tensor, tf.Tensor: - Sequence embeddings. - Positional embeddings.
20
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def embedding(self, input_seq): seq_embeddings = self.item_embedding_layer(input_seq) seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5) # FIXME positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0) positional_seq = tf.tile(positional_seq, [tf.shape(input_seq)[0], 1]) positional_embeddings = self.positional_embedding_layer(positional_seq) return seq_embeddings, positional_embeddings ``` ###Assistant : Compute the sequence and positional embeddings. Args: input_seq (tf.Tensor): Input sequence Returns: tf.Tensor, tf.Tensor: - Sequence embeddings. - Positional embeddings.
2,135
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True): nocolor = msg if not log_only: has_newline = msg.endswith(u'\n') if has_newline: msg2 = msg[:-1] else: msg2 = msg if color: msg2 = stringc(msg2, color) if has_newline or newline: msg2 = msg2 + u'\n' msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) # Convert back to text string # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') # Note: After Display() class is refactored need to update the log capture # code in 'bin/ansible-connection' (and other relevant places). if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to "head -n1" if e.errno != errno.EPIPE: raise if logger and not screen_only: # We first convert to a byte string so that we get rid of # color and characters that are invalid in the user's locale msg2 = to_bytes(nocolor.lstrip(u'\n')) # Convert back to text string msg2 = to_text(msg2, self._output_encoding(stderr=stderr)) lvl = logging.INFO if color: # set logger level based on color (not great) try: lvl = color_to_log_level[color] except KeyError: # this should not happen, but JIC raise AnsibleAssertionError('Invalid color supplied to display: %s' % color) # actually log logger.log(lvl, msg2)
Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
17
223
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True): nocolor = msg if not log_only: has_newline = msg.endswith(u'\n') if has_newline: msg2 = msg[:-1] else: msg2 = msg if color: msg2 = stringc(msg2, color) if has_newline or newline: msg2 = msg2 + u'\n' msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) # Convert back to text string # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') # Note: After Display() class is refactored need to update the log capture # code in 'bin/ansible-connection' (and other relevant places). if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to "head -n1" if e.errno != errno.EPIPE: raise if logger and not screen_only: # We first convert to a byte string so that we get rid of # color and characters that are invalid in the user's locale msg2 = to_bytes(nocolor.lstrip(u'\n')) # Convert back to text string msg2 = to_text(msg2, self._output_encoding(stderr=stderr)) lvl = logging.INFO if color: # set logger level based on color (not great) try: lvl = color_to_log_level[color] except KeyError: # this should not happen, but JIC raise AnsibleAssertionError('Invalid color supplied to display: %s' % color) # actually log logger.log(lvl, msg2) ``` ###Assistant : Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
2,136
def get_queryset(self, request): queryset = SavedFilter.objects.all() user = request.user if user.is_superuser: return queryset if user.is_anonymous: return queryset.filter(shared=True) return queryset.filter( Q(shared=True) | Q(user=user) )
Return only shared SavedFilters, or those owned by the current user, unless this is a superuser.
16
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_queryset(self, request): queryset = SavedFilter.objects.all() user = request.user if user.is_superuser: return queryset if user.is_anonymous: return queryset.filter(shared=True) return queryset.filter( Q(shared=True) | Q(user=user) ) ``` ###Assistant : Return only shared SavedFilters, or those owned by the current user, unless this is a superuser.
2,137
def list(self, request): report_list = [] report_content_type = ContentType.objects.get(app_label='extras', model='report') results = { r.name: r for r in JobResult.objects.filter( obj_type=report_content_type, status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES ).order_by('name', '-created').distinct('name').defer('data') } # Iterate through all available Reports. for module_name, reports in get_reports(): for report in reports: # Attach the relevant JobResult (if any) to each Report. report.result = results.get(report.full_name, None) report_list.append(report) serializer = serializers.ReportSerializer(report_list, many=True, context={ 'request': request, }) return Response(serializer.data)
Compile all reports and their related results (if any). Result data is deferred in the list view.
17
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list(self, request): report_list = [] report_content_type = ContentType.objects.get(app_label='extras', model='report') results = { r.name: r for r in JobResult.objects.filter( obj_type=report_content_type, status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES ).order_by('name', '-created').distinct('name').defer('data') } # Iterate through all available Reports. for module_name, reports in get_reports(): for report in reports: # Attach the relevant JobResult (if any) to each Report. report.result = results.get(report.full_name, None) report_list.append(report) serializer = serializers.ReportSerializer(report_list, many=True, context={ 'request': request, }) return Response(serializer.data) ``` ###Assistant : Compile all reports and their related results (if any). Result data is deferred in the list view.
2,138
def _usable(self, queryset, name, value): user = self.request.user if self.request else None if not user or user.is_anonymous: if value: return queryset.filter(enabled=True, shared=True) return queryset.filter(Q(enabled=False) | Q(shared=False)) if value: return queryset.filter(enabled=True).filter(Q(shared=True) | Q(user=user)) return queryset.filter(Q(enabled=False) | Q(Q(shared=False) & ~Q(user=user)))
Return only SavedFilters that are both enabled and are shared (or belong to the current user).
16
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _usable(self, queryset, name, value): user = self.request.user if self.request else None if not user or user.is_anonymous: if value: return queryset.filter(enabled=True, shared=True) return queryset.filter(Q(enabled=False) | Q(shared=False)) if value: return queryset.filter(enabled=True).filter(Q(shared=True) | Q(user=user)) return queryset.filter(Q(enabled=False) | Q(Q(shared=False) & ~Q(user=user))) ``` ###Assistant : Return only SavedFilters that are both enabled and are shared (or belong to the current user).
2,139
def get_current_enrollment(student, academic_year=None): current_academic_year = academic_year or frappe.defaults.get_defaults().academic_year program_enrollment_list = frappe.db.sql( , (student, current_academic_year), as_dict=1, ) if program_enrollment_list: return program_enrollment_list[0] else: return None
select name as program_enrollment, student_name, program, student_batch_name as student_batch, student_category, academic_term, academic_year from `tabProgram Enrollment` where student = %s and academic_year = %s order by creation
26
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_current_enrollment(student, academic_year=None): current_academic_year = academic_year or frappe.defaults.get_defaults().academic_year program_enrollment_list = frappe.db.sql( , (student, current_academic_year), as_dict=1, ) if program_enrollment_list: return program_enrollment_list[0] else: return None ``` ###Assistant : select name as program_enrollment, student_name, program, student_batch_name as student_batch, student_category, academic_term, academic_year from `tabProgram Enrollment` where student = %s and academic_year = %s order by creation
2,140
def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): if block_type == "block35": branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == "block17": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == "block8": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError( "Unknown Inception-ResNet block type. " 'Expects "block35", "block17" or "block8", ' "but got: " + str(block_type) ) block_name = block_type + "_" + str(block_idx) channel_axis = 1 if backend.image_data_format() == "channels_first" else 3 mixed = layers.Concatenate(axis=channel_axis, name=block_name + "_mixed")( branches ) up = conv2d_bn( mixed, backend.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + "_conv", ) x = CustomScaleLayer()(x, up, scale) if activation is not None: x = layers.Activation(activation, name=block_name + "_ac")(x) return x @keras_export("keras.applications.inception_resnet_v2.preprocess_input")
Adds an Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` Args: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block (see [activations](../activations.md)). When `activation=None`, no activation is applied (i.e., "linear" activation: `a(x) = x`). Returns: Output tensor for the block. Raises: ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`.
193
180
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): if block_type == "block35": branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == "block17": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == "block8": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError( "Unknown Inception-ResNet block type. " 'Expects "block35", "block17" or "block8", ' "but got: " + str(block_type) ) block_name = block_type + "_" + str(block_idx) channel_axis = 1 if backend.image_data_format() == "channels_first" else 3 mixed = layers.Concatenate(axis=channel_axis, name=block_name + "_mixed")( branches ) up = conv2d_bn( mixed, backend.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + "_conv", ) x = CustomScaleLayer()(x, up, scale) if activation is not None: x = layers.Activation(activation, name=block_name + "_ac")(x) return x @keras_export("keras.applications.inception_resnet_v2.preprocess_input") ``` ###Assistant : Adds an Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` Args: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block (see [activations](../activations.md)). When `activation=None`, no activation is applied (i.e., "linear" activation: `a(x) = x`). Returns: Output tensor for the block. Raises: ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`.
2,141
def _get_calculations(self): for selection in self._selections: if selection == "raw": continue logger.debug("Calculating: %s", selection) method = getattr(self, f"_calc_{selection}") raw_keys = [key for key in self._stats if key.startswith("raw_")] for key in raw_keys: selected_key = f"{selection}_{key.replace('raw_', '')}" self._stats[selected_key] = method(self._stats[key])
Perform the required calculations and populate :attr:`stats`.
7
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_calculations(self): for selection in self._selections: if selection == "raw": continue logger.debug("Calculating: %s", selection) method = getattr(self, f"_calc_{selection}") raw_keys = [key for key in self._stats if key.startswith("raw_")] for key in raw_keys: selected_key = f"{selection}_{key.replace('raw_', '')}" self._stats[selected_key] = method(self._stats[key]) ``` ###Assistant : Perform the required calculations and populate :attr:`stats`.
2,142
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): if solver_type not in ['dpm_solver', 'taylor']: raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) ns = self.noise_schedule dims = x.dim() model_prev_1, model_prev_0 = model_prev_list t_prev_1, t_prev_0 = t_prev_list lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0 = h_0 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) if self.predict_x0: if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 ) else: if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 ) return x_t
Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. model_prev_list: A list of pytorch tensor. The previous computed model values. t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) t: A pytorch tensor. The ending time, with the shape (x.shape[0],). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`.
91
228
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): if solver_type not in ['dpm_solver', 'taylor']: raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) ns = self.noise_schedule dims = x.dim() model_prev_1, model_prev_0 = model_prev_list t_prev_1, t_prev_0 = t_prev_list lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0 = h_0 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) if self.predict_x0: if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 ) else: if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 ) return x_t ``` ###Assistant : Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. model_prev_list: A list of pytorch tensor. The previous computed model values. t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) t: A pytorch tensor. The ending time, with the shape (x.shape[0],). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`.
2,143
def global_enum(cls, update_str=False): if issubclass(cls, Flag): cls.__repr__ = global_flag_repr else: cls.__repr__ = global_enum_repr if not issubclass(cls, ReprEnum) or update_str: cls.__str__ = global_str sys.modules[cls.__module__].__dict__.update(cls.__members__) return cls
decorator that makes the repr() of an enum member reference its module instead of its class; also exports all members to the enum's module's global namespace
26
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def global_enum(cls, update_str=False): if issubclass(cls, Flag): cls.__repr__ = global_flag_repr else: cls.__repr__ = global_enum_repr if not issubclass(cls, ReprEnum) or update_str: cls.__str__ = global_str sys.modules[cls.__module__].__dict__.update(cls.__members__) return cls ``` ###Assistant : decorator that makes the repr() of an enum member reference its module instead of its class; also exports all members to the enum's module's global namespace
2,144
def create_training_target(self, target, run_eagerly=False): if self.has_training_target(): raise ValueError( "The training_target field for the _TrainingEndpoint " "instance has already been populated" ) if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False ) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not backend.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, backend.dtype(self.output) ) target = backend.placeholder( ndim=len(self.shape), name=self.output_name + "_target", sparse=backend.is_sparse(self.output), dtype=target_dtype, ) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights, )
Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated.
67
101
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_training_target(self, target, run_eagerly=False): if self.has_training_target(): raise ValueError( "The training_target field for the _TrainingEndpoint " "instance has already been populated" ) if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False ) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not backend.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, backend.dtype(self.output) ) target = backend.placeholder( ndim=len(self.shape), name=self.output_name + "_target", sparse=backend.is_sparse(self.output), dtype=target_dtype, ) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights, ) ``` ###Assistant : Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated.
2,145
def test_set_task_instance_state(run_id, execution_date, session, dag_maker): start_date = datetime_tz(2020, 1, 1) with dag_maker("test_set_task_instance_state", start_date=start_date, session=session) as dag: task_1 = DummyOperator(task_id="task_1") task_2 = DummyOperator(task_id="task_2") task_3 = DummyOperator(task_id="task_3") task_4 = DummyOperator(task_id="task_4") task_5 = DummyOperator(task_id="task_5") task_1 >> [task_2, task_3, task_4, task_5] dagrun = dag_maker.create_dagrun( run_id=run_id, execution_date=execution_date, state=State.FAILED, run_type=DagRunType.SCHEDULED, )
Test that set_task_instance_state updates the TaskInstance state and clear downstream failed
11
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_task_instance_state(run_id, execution_date, session, dag_maker): start_date = datetime_tz(2020, 1, 1) with dag_maker("test_set_task_instance_state", start_date=start_date, session=session) as dag: task_1 = DummyOperator(task_id="task_1") task_2 = DummyOperator(task_id="task_2") task_3 = DummyOperator(task_id="task_3") task_4 = DummyOperator(task_id="task_4") task_5 = DummyOperator(task_id="task_5") task_1 >> [task_2, task_3, task_4, task_5] dagrun = dag_maker.create_dagrun( run_id=run_id, execution_date=execution_date, state=State.FAILED, run_type=DagRunType.SCHEDULED, ) ``` ###Assistant : Test that set_task_instance_state updates the TaskInstance state and clear downstream failed
2,146
async def async_step_link(self, user_input=None): errors = {} if user_input is not None: # Do not authenticate if the host is already configured self._async_abort_entries_match({CONF_HOST: self._host}) try: info = await authenticate( self.hass, self._host, self._port, self._servers ) except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: return self.async_create_entry(title=DEFAULT_NAME, data=info) return self.async_show_form(step_id="link", errors=errors)
Handle linking and authenticting with the roon server.
8
56
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_step_link(self, user_input=None): errors = {} if user_input is not None: # Do not authenticate if the host is already configured self._async_abort_entries_match({CONF_HOST: self._host}) try: info = await authenticate( self.hass, self._host, self._port, self._servers ) except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: return self.async_create_entry(title=DEFAULT_NAME, data=info) return self.async_show_form(step_id="link", errors=errors) ``` ###Assistant : Handle linking and authenticting with the roon server.
2,147
def test_get_sid_string_none(): sid_obj = salt.utils.win_dacl.get_sid(None) assert isinstance(sid_obj, pywintypes.SIDType) assert salt.utils.win_dacl.get_sid_string(sid_obj) == "S-1-0-0"
Validate getting a null sid (S-1-0-0) when a null sid is passed
12
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_sid_string_none(): sid_obj = salt.utils.win_dacl.get_sid(None) assert isinstance(sid_obj, pywintypes.SIDType) assert salt.utils.win_dacl.get_sid_string(sid_obj) == "S-1-0-0" ``` ###Assistant : Validate getting a null sid (S-1-0-0) when a null sid is passed
2,148
def read(self, filename, split='train'): label_list = self.get_labels() vocab_info = self.get_vocab()
Returns a dataset containing all the examples that can be read from the file path. If `self.lazy` is False, this eagerly reads all instances from `self._read()` and returns a `MapDataset`. If `self.lazy` is True, this returns an `IterDataset`, which internally relies on the generator created from `self._read()` to lazily produce examples. In this case your implementation of `_read()` must also be lazy (that is, not load all examples into memory at once). Args: filename (str): Path of data file to read, usually provided by `_get_data` function. split (str, optional): The split name of selected dataset. This only makes a different when data files of different splits have different structures. Returns: A `MapDataset|IterDataset`.
112
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read(self, filename, split='train'): label_list = self.get_labels() vocab_info = self.get_vocab() ``` ###Assistant : Returns a dataset containing all the examples that can be read from the file path. If `self.lazy` is False, this eagerly reads all instances from `self._read()` and returns a `MapDataset`. If `self.lazy` is True, this returns an `IterDataset`, which internally relies on the generator created from `self._read()` to lazily produce examples. In this case your implementation of `_read()` must also be lazy (that is, not load all examples into memory at once). Args: filename (str): Path of data file to read, usually provided by `_get_data` function. split (str, optional): The split name of selected dataset. This only makes a different when data files of different splits have different structures. Returns: A `MapDataset|IterDataset`.
2,149
def test_no_rule_for_attr(self): tag = self.soup.b fn = attribute_rule({"snowman": "barbecue"}) fn(tag) self.assertEqual(str(tag), "<b>baz</b>")
Test that attribute_rule() drops attributes for which no rule has been defined.
12
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_no_rule_for_attr(self): tag = self.soup.b fn = attribute_rule({"snowman": "barbecue"}) fn(tag) self.assertEqual(str(tag), "<b>baz</b>") ``` ###Assistant : Test that attribute_rule() drops attributes for which no rule has been defined.
2,150
def _inspect_generic_serialization(base_obj, depth, parent, failure_set): assert not inspect.isfunction(base_obj) functions = inspect.getmembers(base_obj, predicate=inspect.isfunction) found = False with _printer.indent(): for name, obj in functions: serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break with _printer.indent(): members = inspect.getmembers(base_obj) for name, obj in members: if name.startswith("__") and name.endswith("__") or inspect.isbuiltin(obj): continue serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break if not found: _printer.print( f"WARNING: Did not find non-serializable object in {base_obj}. " "This may be an oversight." ) return found @DeveloperAPI
Adds the first-found non-serializable element to the failure_set.
8
103
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _inspect_generic_serialization(base_obj, depth, parent, failure_set): assert not inspect.isfunction(base_obj) functions = inspect.getmembers(base_obj, predicate=inspect.isfunction) found = False with _printer.indent(): for name, obj in functions: serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break with _printer.indent(): members = inspect.getmembers(base_obj) for name, obj in members: if name.startswith("__") and name.endswith("__") or inspect.isbuiltin(obj): continue serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break if not found: _printer.print( f"WARNING: Did not find non-serializable object in {base_obj}. " "This may be an oversight." ) return found @DeveloperAPI ``` ###Assistant : Adds the first-found non-serializable element to the failure_set.
2,151
def is_dihedral(self): r if self._is_dihedral is not None: return self._is_dihedral order = self.order() if order % 2 == 1: self._is_dihedral = False return False if order == 2: self._is_dihedral = True return True if order == 4: # The dihedral group of order 4 is the Klein 4-group. self._is_dihedral = not self.is_cyclic return self._is_dihedral if self.is_abelian: # The only abelian dihedral groups are the ones of orders 2 and 4. self._is_dihedral = False return False # Now we know the group is of even order >= 6, and nonabelian. n = order // 2 # Handle special cases where there are exactly two generators. gens = self.generators if len(gens) == 2: x, y = gens a, b = x.order(), y.order() # Make a >= b if a < b: x, y, a, b = y, x, b, a # Using Theorem 2.1 of [3]: if {a, b} == {2}: self._is_dihedral = True return True # Using Theorem 1.1 of [3]: if (a, b) == (n, 2) and y*x*y == ~x: self._is_dihedral = True return True # Procede with algorithm of [1] # Find elements of orders 2 and n order_2, order_n = [], [] for p in self.elements: k = p.order() if k == 2: order_2.append(p) elif k == n: order_n.append(p) if len(order_2) != n + 1 - (n % 2): self._is_dihedral = False return False if not order_n: self._is_dihedral = False return False x = order_n[0] # Want an element y of order 2 that is not a power of x # (i.e. that is not the 180-deg rotation, when n is even). y = order_2[0] if n % 2 == 0 and y == x**(n//2): y = order_2[1] self._is_dihedral = (y*x*y == ~x) return self._is_dihedral
Return ``True`` if the group is dihedral. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup >>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6)) >>> G.is_dihedral True >>> G = SymmetricGroup(3) >>> G.is_dihedral True >>> G = CyclicGroup(6) >>> G.is_dihedral False References ========== .. [1] https://math.stackexchange.com/a/827273 .. [2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf .. [3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf .. [4] https://en.wikipedia.org/wiki/Dihedral_group
70
287
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_dihedral(self): r if self._is_dihedral is not None: return self._is_dihedral order = self.order() if order % 2 == 1: self._is_dihedral = False return False if order == 2: self._is_dihedral = True return True if order == 4: # The dihedral group of order 4 is the Klein 4-group. self._is_dihedral = not self.is_cyclic return self._is_dihedral if self.is_abelian: # The only abelian dihedral groups are the ones of orders 2 and 4. self._is_dihedral = False return False # Now we know the group is of even order >= 6, and nonabelian. n = order // 2 # Handle special cases where there are exactly two generators. gens = self.generators if len(gens) == 2: x, y = gens a, b = x.order(), y.order() # Make a >= b if a < b: x, y, a, b = y, x, b, a # Using Theorem 2.1 of [3]: if {a, b} == {2}: self._is_dihedral = True return True # Using Theorem 1.1 of [3]: if (a, b) == (n, 2) and y*x*y == ~x: self._is_dihedral = True return True # Procede with algorithm of [1] # Find elements of orders 2 and n order_2, order_n = [], [] for p in self.elements: k = p.order() if k == 2: order_2.append(p) elif k == n: order_n.append(p) if len(order_2) != n + 1 - (n % 2): self._is_dihedral = False return False if not order_n: self._is_dihedral = False return False x = order_n[0] # Want an element y of order 2 that is not a power of x # (i.e. that is not the 180-deg rotation, when n is even). y = order_2[0] if n % 2 == 0 and y == x**(n//2): y = order_2[1] self._is_dihedral = (y*x*y == ~x) return self._is_dihedral ``` ###Assistant : Return ``True`` if the group is dihedral. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup >>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6)) >>> G.is_dihedral True >>> G = SymmetricGroup(3) >>> G.is_dihedral True >>> G = CyclicGroup(6) >>> G.is_dihedral False References ========== .. [1] https://math.stackexchange.com/a/827273 .. [2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf .. [3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf .. [4] https://en.wikipedia.org/wiki/Dihedral_group
2,152
def extend_rearport_cable_paths(instance, created, **kwargs): if created: rearport = instance.rear_port for cablepath in CablePath.objects.filter(_nodes__contains=rearport): cablepath.retrace()
When a new FrontPort is created, add it to any CablePaths which end at its corresponding RearPort.
17
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extend_rearport_cable_paths(instance, created, **kwargs): if created: rearport = instance.rear_port for cablepath in CablePath.objects.filter(_nodes__contains=rearport): cablepath.retrace() ``` ###Assistant : When a new FrontPort is created, add it to any CablePaths which end at its corresponding RearPort.
2,153
def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size): logger.debug("Number image_files: %s, frame_dims: %s, thumbnail_size: %s", len(image_files), frame_dims, thumbnail_size) num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size) logger.debug("num_images: %s", num_images) if num_images == 0: return False samples = [] start_idx = len(image_files) - num_images if len(image_files) > num_images else 0 show_files = sorted(image_files, key=os.path.getctime)[start_idx:] dropped_files = [] for fname in show_files: try: img = Image.open(fname) except PermissionError as err: logger.debug("Permission error opening preview file: '%s'. Original error: %s", fname, str(err)) dropped_files.append(fname) continue except Exception as err: # pylint:disable=broad-except # Swallow any issues with opening an image rather than spamming console # Can happen when trying to read partially saved images logger.debug("Error opening preview file: '%s'. Original error: %s", fname, str(err)) dropped_files.append(fname) continue width, height = img.size scaling = thumbnail_size / max(width, height) logger.debug("image width: %s, height: %s, scaling: %s", width, height, scaling) try: img = img.resize((int(width * scaling), int(height * scaling))) except OSError as err: # Image only gets loaded when we call a method, so may error on partial loads logger.debug("OS Error resizing preview image: '%s'. Original error: %s", fname, err) dropped_files.append(fname) continue if img.size[0] != img.size[1]: # Pad to square new_img = Image.new("RGB", (thumbnail_size, thumbnail_size)) new_img.paste(img, ((thumbnail_size - img.size[0])//2, (thumbnail_size - img.size[1])//2)) img = new_img draw = ImageDraw.Draw(img) draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline="#E5E5E5", width=1) samples.append(np.array(img)) samples = np.array(samples) if not np.any(samples): logger.debug("No preview images collected.") return False if dropped_files: logger.debug("Removing dropped files: %s", dropped_files) show_files = [fname for fname in show_files if fname not in dropped_files] self._previewcache["filenames"] = (self._previewcache["filenames"] + show_files)[-num_images:] cache = self._previewcache["images"] if cache is None: logger.debug("Creating new cache") cache = samples[-num_images:] else: logger.debug("Appending to existing cache") cache = np.concatenate((cache, samples))[-num_images:] self._previewcache["images"] = cache logger.debug("Cache shape: %s", self._previewcache["images"].shape) return True
Load preview images to the image cache. Load new images and append to cache, filtering the cache the number of thumbnails that will fit inside the display panel. Parameters ---------- image_files: list A list of new image files that have been modified since the last check frame_dims: tuple The (width (`int`), height (`int`)) of the display panel that will display the preview thumbnail_size: int The size of each thumbnail that should be created Returns ------- bool ``True`` if images were successfully loaded to cache otherwise ``False``
86
281
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size): logger.debug("Number image_files: %s, frame_dims: %s, thumbnail_size: %s", len(image_files), frame_dims, thumbnail_size) num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size) logger.debug("num_images: %s", num_images) if num_images == 0: return False samples = [] start_idx = len(image_files) - num_images if len(image_files) > num_images else 0 show_files = sorted(image_files, key=os.path.getctime)[start_idx:] dropped_files = [] for fname in show_files: try: img = Image.open(fname) except PermissionError as err: logger.debug("Permission error opening preview file: '%s'. Original error: %s", fname, str(err)) dropped_files.append(fname) continue except Exception as err: # pylint:disable=broad-except # Swallow any issues with opening an image rather than spamming console # Can happen when trying to read partially saved images logger.debug("Error opening preview file: '%s'. Original error: %s", fname, str(err)) dropped_files.append(fname) continue width, height = img.size scaling = thumbnail_size / max(width, height) logger.debug("image width: %s, height: %s, scaling: %s", width, height, scaling) try: img = img.resize((int(width * scaling), int(height * scaling))) except OSError as err: # Image only gets loaded when we call a method, so may error on partial loads logger.debug("OS Error resizing preview image: '%s'. Original error: %s", fname, err) dropped_files.append(fname) continue if img.size[0] != img.size[1]: # Pad to square new_img = Image.new("RGB", (thumbnail_size, thumbnail_size)) new_img.paste(img, ((thumbnail_size - img.size[0])//2, (thumbnail_size - img.size[1])//2)) img = new_img draw = ImageDraw.Draw(img) draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline="#E5E5E5", width=1) samples.append(np.array(img)) samples = np.array(samples) if not np.any(samples): logger.debug("No preview images collected.") return False if dropped_files: logger.debug("Removing dropped files: %s", dropped_files) show_files = [fname for fname in show_files if fname not in dropped_files] self._previewcache["filenames"] = (self._previewcache["filenames"] + show_files)[-num_images:] cache = self._previewcache["images"] if cache is None: logger.debug("Creating new cache") cache = samples[-num_images:] else: logger.debug("Appending to existing cache") cache = np.concatenate((cache, samples))[-num_images:] self._previewcache["images"] = cache logger.debug("Cache shape: %s", self._previewcache["images"].shape) return True ``` ###Assistant : Load preview images to the image cache. Load new images and append to cache, filtering the cache the number of thumbnails that will fit inside the display panel. Parameters ---------- image_files: list A list of new image files that have been modified since the last check frame_dims: tuple The (width (`int`), height (`int`)) of the display panel that will display the preview thumbnail_size: int The size of each thumbnail that should be created Returns ------- bool ``True`` if images were successfully loaded to cache otherwise ``False``
2,154
def target_profile(self) -> t.Optional[PosixProfile]: return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None
The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None.
16
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def target_profile(self) -> t.Optional[PosixProfile]: return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None ``` ###Assistant : The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None.
2,155
def preprocess_transactions(self): p_bar = tqdm(range(14), desc="Preprocessing transactions") try: # 0. If optional fields not in the transactions add missing optional_fields = [ "Sector", "Industry", "Country", "Region", "Fees", "Premium", "ISIN", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions["Date"] = pd.to_datetime(self.__transactions["Date"]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by="Date") p_bar.n += 1 p_bar.refresh() # 3. Capitalize Ticker and Type [of instrument...] self.__transactions["Ticker"] = self.__transactions["Ticker"].map( lambda x: x.upper() ) self.__transactions["Type"] = self.__transactions["Type"].map( lambda x: x.upper() ) p_bar.n += 1 p_bar.refresh() # 4. Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 self.__transactions["Signal"] = self.__transactions["Side"].map( lambda x: 1 if x.lower() in ["deposit", "buy"] else (-1 if x.lower() in ["withdrawal", "sell"] else 0) ) p_bar.n += 1 p_bar.refresh() # 5. Convert quantity to signed integer self.__transactions["Quantity"] = ( abs(self.__transactions["Quantity"]) * self.__transactions["Signal"] ) p_bar.n += 1 p_bar.refresh() # 6. Determining the investment/divestment value self.__transactions["Investment"] = ( self.__transactions["Quantity"] * self.__transactions["Price"] + self.__transactions["Fees"] ) p_bar.n += 1 p_bar.refresh() # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__transactions[self.__transactions.Type == "CRYPTO"] self.__transactions.loc[ (self.__transactions.Type == "CRYPTO"), "Ticker" ] = [ f"{crypto}-{currency}" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] p_bar.n += 1 p_bar.refresh() # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided. # If isin not valid ticker is empty self.__transactions["yf_Ticker"] = self.__transactions["ISIN"].apply( lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan ) empty_tickers = list( self.__transactions[ (self.__transactions["yf_Ticker"] == "") | (self.__transactions["yf_Ticker"].isna()) ]["Ticker"].unique() ) # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported removed_tickers = [] for item in empty_tickers: with contextlib.redirect_stdout(None): # Suppress yfinance failed download message if occurs valid_ticker = not ( yf.download( item, start=datetime.datetime.now() + datetime.timedelta(days=-5), progress=False, ).empty ) if valid_ticker: # Invalid ISIN but valid ticker self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = np.nan else: self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = "" removed_tickers.append(item) # Merge reformated tickers into Ticker self.__transactions["Ticker"] = self.__transactions["yf_Ticker"].fillna( self.__transactions["Ticker"] ) p_bar.n += 1 p_bar.refresh() # 9. Remove unsupported ISINs that came out empty self.__transactions.drop( self.__transactions[self.__transactions["Ticker"] == ""].index, inplace=True, ) p_bar.n += 1 p_bar.refresh() # 10. Create tickers dictionary with structure {'Type': [Ticker]} for ticker_type in set(self.__transactions["Type"]): self.tickers[ticker_type] = list( set( self.__transactions[ self.__transactions["Type"].isin([ticker_type]) ]["Ticker"] ) ) p_bar.n += 1 p_bar.refresh() # 11. Create list with tickers except cash self.tickers_list = list(set(self.__transactions["Ticker"])) p_bar.n += 1 p_bar.refresh() # 12. Save transactions inception date self.inception_date = self.__transactions["Date"][0] p_bar.n += 1 p_bar.refresh() # 13. Populate fields Sector, Industry and Country if ( self.__transactions.loc[ self.__transactions["Type"] == "STOCK", optional_fields, ] .isnull() .values.any() ): # If any fields is empty for stocks (overwrites any info there) self.load_company_data() p_bar.n += 1 p_bar.refresh() # Warn user of removed ISINs if removed_tickers: console.print( f"\n\n[red]The following tickers are not supported and were removed: {removed_tickers}." f"\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN." f"\nSuffix info on 'Yahoo Finance market coverage':" " https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html" f"\nE.g. IWDA -> IWDA.AS[/red]" ) except Exception: console.print("\nCould not preprocess transactions.")
Method to preprocess, format and compute auxiliary fields. Preprocessing steps: 0. If optional fields not in the transactions add missing 1. Convert Date to datetime 2. Sort transactions by date 3. Capitalize Ticker and Type [of instrument...] 4. Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 5. Convert quantity to signed integer 6. Determining the investment/divestment value 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided 9. Remove unsupported ISINs that came out empty 10. Create tickers dictionary with structure {'Type': [Ticker]} 11. Create list with tickers except cash 12. Save transactions inception date 13. Populate fields Sector, Industry and Country
116
512
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocess_transactions(self): p_bar = tqdm(range(14), desc="Preprocessing transactions") try: # 0. If optional fields not in the transactions add missing optional_fields = [ "Sector", "Industry", "Country", "Region", "Fees", "Premium", "ISIN", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions["Date"] = pd.to_datetime(self.__transactions["Date"]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by="Date") p_bar.n += 1 p_bar.refresh() # 3. Capitalize Ticker and Type [of instrument...] self.__transactions["Ticker"] = self.__transactions["Ticker"].map( lambda x: x.upper() ) self.__transactions["Type"] = self.__transactions["Type"].map( lambda x: x.upper() ) p_bar.n += 1 p_bar.refresh() # 4. Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 self.__transactions["Signal"] = self.__transactions["Side"].map( lambda x: 1 if x.lower() in ["deposit", "buy"] else (-1 if x.lower() in ["withdrawal", "sell"] else 0) ) p_bar.n += 1 p_bar.refresh() # 5. Convert quantity to signed integer self.__transactions["Quantity"] = ( abs(self.__transactions["Quantity"]) * self.__transactions["Signal"] ) p_bar.n += 1 p_bar.refresh() # 6. Determining the investment/divestment value self.__transactions["Investment"] = ( self.__transactions["Quantity"] * self.__transactions["Price"] + self.__transactions["Fees"] ) p_bar.n += 1 p_bar.refresh() # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__transactions[self.__transactions.Type == "CRYPTO"] self.__transactions.loc[ (self.__transactions.Type == "CRYPTO"), "Ticker" ] = [ f"{crypto}-{currency}" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] p_bar.n += 1 p_bar.refresh() # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided. # If isin not valid ticker is empty self.__transactions["yf_Ticker"] = self.__transactions["ISIN"].apply( lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan ) empty_tickers = list( self.__transactions[ (self.__transactions["yf_Ticker"] == "") | (self.__transactions["yf_Ticker"].isna()) ]["Ticker"].unique() ) # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported removed_tickers = [] for item in empty_tickers: with contextlib.redirect_stdout(None): # Suppress yfinance failed download message if occurs valid_ticker = not ( yf.download( item, start=datetime.datetime.now() + datetime.timedelta(days=-5), progress=False, ).empty ) if valid_ticker: # Invalid ISIN but valid ticker self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = np.nan else: self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = "" removed_tickers.append(item) # Merge reformated tickers into Ticker self.__transactions["Ticker"] = self.__transactions["yf_Ticker"].fillna( self.__transactions["Ticker"] ) p_bar.n += 1 p_bar.refresh() # 9. Remove unsupported ISINs that came out empty self.__transactions.drop( self.__transactions[self.__transactions["Ticker"] == ""].index, inplace=True, ) p_bar.n += 1 p_bar.refresh() # 10. Create tickers dictionary with structure {'Type': [Ticker]} for ticker_type in set(self.__transactions["Type"]): self.tickers[ticker_type] = list( set( self.__transactions[ self.__transactions["Type"].isin([ticker_type]) ]["Ticker"] ) ) p_bar.n += 1 p_bar.refresh() # 11. Create list with tickers except cash self.tickers_list = list(set(self.__transactions["Ticker"])) p_bar.n += 1 p_bar.refresh() # 12. Save transactions inception date self.inception_date = self.__transactions["Date"][0] p_bar.n += 1 p_bar.refresh() # 13. Populate fields Sector, Industry and Country if ( self.__transactions.loc[ self.__transactions["Type"] == "STOCK", optional_fields, ] .isnull() .values.any() ): # If any fields is empty for stocks (overwrites any info there) self.load_company_data() p_bar.n += 1 p_bar.refresh() # Warn user of removed ISINs if removed_tickers: console.print( f"\n\n[red]The following tickers are not supported and were removed: {removed_tickers}." f"\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN." f"\nSuffix info on 'Yahoo Finance market coverage':" " https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html" f"\nE.g. IWDA -> IWDA.AS[/red]" ) except Exception: console.print("\nCould not preprocess transactions.") ``` ###Assistant : Method to preprocess, format and compute auxiliary fields. Preprocessing steps: 0. If optional fields not in the transactions add missing 1. Convert Date to datetime 2. Sort transactions by date 3. Capitalize Ticker and Type [of instrument...] 4. Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 5. Convert quantity to signed integer 6. Determining the investment/divestment value 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided 9. Remove unsupported ISINs that came out empty 10. Create tickers dictionary with structure {'Type': [Ticker]} 11. Create list with tickers except cash 12. Save transactions inception date 13. Populate fields Sector, Industry and Country
2,156
def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if getattr(var, "_in_graph_mode", False): return var._shared_name return var._unique_id
Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable.
54
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if getattr(var, "_in_graph_mode", False): return var._shared_name return var._unique_id ``` ###Assistant : Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable.
2,157
def test_stacking_classifier_base_regressor(): X_train, X_test, y_train, y_test = train_test_split( scale(X_iris), y_iris, stratify=y_iris, random_state=42 ) clf = StackingClassifier(estimators=[("ridge", Ridge())]) clf.fit(X_train, y_train) clf.predict(X_test) clf.predict_proba(X_test) assert clf.score(X_test, y_test) > 0.8
Check that a regressor can be used as the first layer in `StackingClassifier`.
13
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stacking_classifier_base_regressor(): X_train, X_test, y_train, y_test = train_test_split( scale(X_iris), y_iris, stratify=y_iris, random_state=42 ) clf = StackingClassifier(estimators=[("ridge", Ridge())]) clf.fit(X_train, y_train) clf.predict(X_test) clf.predict_proba(X_test) assert clf.score(X_test, y_test) > 0.8 ``` ###Assistant : Check that a regressor can be used as the first layer in `StackingClassifier`.
2,158
def _get_style_dict(self, gc, rgbFace): attrib = {} forced_alpha = gc.get_forced_alpha() if gc.get_hatch() is not None: attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace) if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) else: if rgbFace is None: attrib['fill'] = 'none' else: if tuple(rgbFace[:3]) != (0, 0, 0): attrib['fill'] = rgb2hex(rgbFace) if (len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) if forced_alpha and gc.get_alpha() != 1.0: attrib['opacity'] = _short_float_fmt(gc.get_alpha()) offset, seq = gc.get_dashes() if seq is not None: attrib['stroke-dasharray'] = ','.join( _short_float_fmt(val) for val in seq) attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) linewidth = gc.get_linewidth() if linewidth: rgb = gc.get_rgb() attrib['stroke'] = rgb2hex(rgb) if not forced_alpha and rgb[3] != 1.0: attrib['stroke-opacity'] = _short_float_fmt(rgb[3]) if linewidth != 1.0: attrib['stroke-width'] = _short_float_fmt(linewidth) if gc.get_joinstyle() != 'round': attrib['stroke-linejoin'] = gc.get_joinstyle() if gc.get_capstyle() != 'butt': attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()] return attrib
Generate a style string from the GraphicsContext and rgbFace.
9
145
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_style_dict(self, gc, rgbFace): attrib = {} forced_alpha = gc.get_forced_alpha() if gc.get_hatch() is not None: attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace) if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) else: if rgbFace is None: attrib['fill'] = 'none' else: if tuple(rgbFace[:3]) != (0, 0, 0): attrib['fill'] = rgb2hex(rgbFace) if (len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) if forced_alpha and gc.get_alpha() != 1.0: attrib['opacity'] = _short_float_fmt(gc.get_alpha()) offset, seq = gc.get_dashes() if seq is not None: attrib['stroke-dasharray'] = ','.join( _short_float_fmt(val) for val in seq) attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) linewidth = gc.get_linewidth() if linewidth: rgb = gc.get_rgb() attrib['stroke'] = rgb2hex(rgb) if not forced_alpha and rgb[3] != 1.0: attrib['stroke-opacity'] = _short_float_fmt(rgb[3]) if linewidth != 1.0: attrib['stroke-width'] = _short_float_fmt(linewidth) if gc.get_joinstyle() != 'round': attrib['stroke-linejoin'] = gc.get_joinstyle() if gc.get_capstyle() != 'butt': attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()] return attrib ``` ###Assistant : Generate a style string from the GraphicsContext and rgbFace.
2,159
def test_union(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = # union all ret = self.command_executor.execute_command( parse_sql(sql.format(union='ALL'), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 + 2 # union ret = self.command_executor.execute_command( parse_sql(sql.format(union=''), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3
SELECT a as a1, b as target FROM pg.tasks UNION {union} SELECT model.a as a2, model.p as target2 FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1
28
85
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_union(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = # union all ret = self.command_executor.execute_command( parse_sql(sql.format(union='ALL'), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 + 2 # union ret = self.command_executor.execute_command( parse_sql(sql.format(union=''), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 ``` ###Assistant : SELECT a as a1, b as target FROM pg.tasks UNION {union} SELECT model.a as a2, model.p as target2 FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1
2,160
def test_run_cleanup_tables(self, clean_table_mock, table_names): base_kwargs = dict( clean_before_timestamp=None, dry_run=None, verbose=None, ) run_cleanup(**base_kwargs, table_names=table_names) assert clean_table_mock.call_count == len(table_names) if table_names else len(config_dict)
``_cleanup_table`` should be called for each table in subset if one is provided else should be called for all tables.
20
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_run_cleanup_tables(self, clean_table_mock, table_names): base_kwargs = dict( clean_before_timestamp=None, dry_run=None, verbose=None, ) run_cleanup(**base_kwargs, table_names=table_names) assert clean_table_mock.call_count == len(table_names) if table_names else len(config_dict) ``` ###Assistant : ``_cleanup_table`` should be called for each table in subset if one is provided else should be called for all tables.
2,161
def test_set_serialize_call_old_signature(self, get_import, session): serialize_watcher = MagicMock()
When XCom.serialize_value takes only param ``value``, other kwargs should be ignored.
11
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_serialize_call_old_signature(self, get_import, session): serialize_watcher = MagicMock() ``` ###Assistant : When XCom.serialize_value takes only param ``value``, other kwargs should be ignored.
2,162
def gmean(a, axis=0, dtype=None, weights=None): if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) if weights is not None: weights = np.asanyarray(weights, dtype=dtype) return np.exp(np.average(log_a, axis=axis, weights=weights))
Compute the geometric mean along the specified axis. Return the geometric average of the array elements. That is: n-th root of (x1 * x2 * ... * xn) Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the geometric mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. weights : array_like, optional The `weights` array must be broadcastable to the same shape as `a`. Default is None, which gives each value a weight of 1.0. Returns ------- gmean : ndarray See `dtype` parameter above. See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average hmean : Harmonic mean Notes ----- The geometric average is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Beginning in SciPy 1.9, ``np.matrix`` inputs are converted to ``np.ndarray``s before the calculation is performed. In this case, the output will be a scalar or ``np.ndarray`` of appropriate shape rather than a 2D ``np.matrix``. Similarly, while masked elements of masked arrays are still ignored, the output will be a scalar or ``np.ndarray`` rather than a masked array with ``mask=False``. References ---------- .. [1] "Weighted Geometric Mean", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean. Examples -------- >>> from scipy.stats import gmean >>> gmean([1, 4]) 2.0 >>> gmean([1, 2, 3, 4, 5, 6, 7]) 3.3800151591412964
301
63
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gmean(a, axis=0, dtype=None, weights=None): if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) if weights is not None: weights = np.asanyarray(weights, dtype=dtype) return np.exp(np.average(log_a, axis=axis, weights=weights)) ``` ###Assistant : Compute the geometric mean along the specified axis. Return the geometric average of the array elements. That is: n-th root of (x1 * x2 * ... * xn) Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the geometric mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. weights : array_like, optional The `weights` array must be broadcastable to the same shape as `a`. Default is None, which gives each value a weight of 1.0. Returns ------- gmean : ndarray See `dtype` parameter above. See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average hmean : Harmonic mean Notes ----- The geometric average is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Beginning in SciPy 1.9, ``np.matrix`` inputs are converted to ``np.ndarray``s before the calculation is performed. In this case, the output will be a scalar or ``np.ndarray`` of appropriate shape rather than a 2D ``np.matrix``. Similarly, while masked elements of masked arrays are still ignored, the output will be a scalar or ``np.ndarray`` rather than a masked array with ``mask=False``. References ---------- .. [1] "Weighted Geometric Mean", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean. Examples -------- >>> from scipy.stats import gmean >>> gmean([1, 4]) 2.0 >>> gmean([1, 2, 3, 4, 5, 6, 7]) 3.3800151591412964
2,163
def handle_m2m_field(self, obj, field): raise NotImplementedError( "subclasses of Serializer must provide a handle_m2m_field() method" )
Called to handle a ManyToManyField.
5
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def handle_m2m_field(self, obj, field): raise NotImplementedError( "subclasses of Serializer must provide a handle_m2m_field() method" ) ``` ###Assistant : Called to handle a ManyToManyField.
2,164
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT * FROM SYS.M_DATABASE') response.success = True except dbapi.Error as e: log.error(f'Error connecting to SAP HANA {self.address}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response
Check the connection of the SAP HANA database :return: success status and error message if error occurs
17
61
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT * FROM SYS.M_DATABASE') response.success = True except dbapi.Error as e: log.error(f'Error connecting to SAP HANA {self.address}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response ``` ###Assistant : Check the connection of the SAP HANA database :return: success status and error message if error occurs
2,165
def extract(self, rowsList, colsList): r if not is_sequence(rowsList) or not is_sequence(colsList): raise TypeError("rowsList and colsList must be iterable") # ensure rowsList and colsList are lists of integers if rowsList and all(isinstance(i, bool) for i in rowsList): rowsList = [index for index, item in enumerate(rowsList) if item] if colsList and all(isinstance(i, bool) for i in colsList): colsList = [index for index, item in enumerate(colsList) if item] # ensure everything is in range rowsList = [a2idx(k, self.rows) for k in rowsList] colsList = [a2idx(k, self.cols) for k in colsList] return self._eval_extract(rowsList, colsList)
Return a submatrix by specifying a list of rows and columns. Negative indices can be given. All indices must be in the range $-n \le i < n$ where $n$ is the number of rows or columns. Examples ======== >>> from sympy import Matrix >>> m = Matrix(4, 3, range(12)) >>> m Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) >>> m.extract([0, 1, 3], [0, 1]) Matrix([ [0, 1], [3, 4], [9, 10]]) Rows or columns can be repeated: >>> m.extract([0, 0, 1], [-1]) Matrix([ [2], [2], [5]]) Every other row can be taken by using range to provide the indices: >>> m.extract(range(0, m.rows, 2), [-1]) Matrix([ [2], [8]]) RowsList or colsList can also be a list of booleans, in which case the rows or columns corresponding to the True values will be selected: >>> m.extract([0, 1, 2, 3], [True, False, True]) Matrix([ [0, 2], [3, 5], [6, 8], [9, 11]])
156
90
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract(self, rowsList, colsList): r if not is_sequence(rowsList) or not is_sequence(colsList): raise TypeError("rowsList and colsList must be iterable") # ensure rowsList and colsList are lists of integers if rowsList and all(isinstance(i, bool) for i in rowsList): rowsList = [index for index, item in enumerate(rowsList) if item] if colsList and all(isinstance(i, bool) for i in colsList): colsList = [index for index, item in enumerate(colsList) if item] # ensure everything is in range rowsList = [a2idx(k, self.rows) for k in rowsList] colsList = [a2idx(k, self.cols) for k in colsList] return self._eval_extract(rowsList, colsList) ``` ###Assistant : Return a submatrix by specifying a list of rows and columns. Negative indices can be given. All indices must be in the range $-n \le i < n$ where $n$ is the number of rows or columns. Examples ======== >>> from sympy import Matrix >>> m = Matrix(4, 3, range(12)) >>> m Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) >>> m.extract([0, 1, 3], [0, 1]) Matrix([ [0, 1], [3, 4], [9, 10]]) Rows or columns can be repeated: >>> m.extract([0, 0, 1], [-1]) Matrix([ [2], [2], [5]]) Every other row can be taken by using range to provide the indices: >>> m.extract(range(0, m.rows, 2), [-1]) Matrix([ [2], [8]]) RowsList or colsList can also be a list of booleans, in which case the rows or columns corresponding to the True values will be selected: >>> m.extract([0, 1, 2, 3], [True, False, True]) Matrix([ [0, 2], [3, 5], [6, 8], [9, 11]])
2,166
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args)
integrate(f, var, ...) Explanation =========== Compute definite or indefinite integral of one or more variables using Risch-Norman algorithm and table lookup. This procedure is able to handle elementary algebraic and transcendental functions and also a huge class of special functions, including Airy, Bessel, Whittaker and Lambert. var can be: - a symbol -- indefinite integration - a tuple (symbol, a) -- indefinite integration with result given with ``a`` replacing ``symbol`` - a tuple (symbol, a, b) -- definite integration Several variables can be specified, in which case the result is multiple integration. (If var is omitted and the integrand is univariate, the indefinite integral in that variable will be performed.) Indefinite integrals are returned without terms that are independent of the integration variables. (see examples) Definite improper integrals often entail delicate convergence conditions. Pass conds='piecewise', 'separate' or 'none' to have these returned, respectively, as a Piecewise function, as a separate result (i.e. result will be a tuple), or not at all (default is 'piecewise'). **Strategy** SymPy uses various approaches to definite integration. One method is to find an antiderivative for the integrand, and then use the fundamental theorem of calculus. Various functions are implemented to integrate polynomial, rational and trigonometric functions, and integrands containing DiracDelta terms. SymPy also implements the part of the Risch algorithm, which is a decision procedure for integrating elementary functions, i.e., the algorithm can either find an elementary antiderivative, or prove that one does not exist. There is also a (very successful, albeit somewhat slow) general implementation of the heuristic Risch algorithm. This algorithm will eventually be phased out as more of the full Risch algorithm is implemented. See the docstring of Integral._eval_integral() for more details on computing the antiderivative using algebraic methods. The option risch=True can be used to use only the (full) Risch algorithm. This is useful if you want to know if an elementary function has an elementary antiderivative. If the indefinite Integral returned by this function is an instance of NonElementaryIntegral, that means that the Risch algorithm has proven that integral to be non-elementary. Note that by default, additional methods (such as the Meijer G method outlined below) are tried on these integrals, as they may be expressible in terms of special functions, so if you only care about elementary answers, use risch=True. Also note that an unevaluated Integral returned by this function is not necessarily a NonElementaryIntegral, even with risch=True, as it may just be an indication that the particular part of the Risch algorithm needed to integrate that function is not yet implemented. Another family of strategies comes from re-writing the integrand in terms of so-called Meijer G-functions. Indefinite integrals of a single G-function can always be computed, and the definite integral of a product of two G-functions can be computed from zero to infinity. Various strategies are implemented to rewrite integrands as G-functions, and use this information to compute integrals (see the ``meijerint`` module). The option manual=True can be used to use only an algorithm that tries to mimic integration by hand. This algorithm does not handle as many integrands as the other algorithms implemented but may return results in a more familiar form. The ``manualintegrate`` module has functions that return the steps used (see the module docstring for more information). In general, the algebraic methods work best for computing antiderivatives of (possibly complicated) combinations of elementary functions. The G-function methods work best for computing definite integrals from zero to infinity of moderately complicated combinations of special functions, or indefinite integrals of very simple combinations of special functions. The strategy employed by the integration code is as follows: - If computing a definite integral, and both limits are real, and at least one limit is +- oo, try the G-function method of definite integration first. - Try to find an antiderivative, using all available methods, ordered by performance (that is try fastest method first, slowest last; in particular polynomial integration is tried first, Meijer G-functions second to last, and heuristic Risch last). - If still not successful, try G-functions irrespective of the limits. The option meijerg=True, False, None can be used to, respectively: always use G-function methods and no others, never use G-function methods, or use all available methods (in order as described above). It defaults to None. Examples ======== >>> from sympy import integrate, log, exp, oo >>> from sympy.abc import a, x, y >>> integrate(x*y, x) x**2*y/2 >>> integrate(log(x), x) x*log(x) - x >>> integrate(log(x), (x, 1, a)) a*log(a) - a + 1 >>> integrate(x) x**2/2 Terms that are independent of x are dropped by indefinite integration: >>> from sympy import sqrt >>> integrate(sqrt(1 + x), (x, 0, x)) 2*(x + 1)**(3/2)/3 - 2/3 >>> integrate(sqrt(1 + x), x) 2*(x + 1)**(3/2)/3 >>> integrate(x*y) Traceback (most recent call last): ... ValueError: specify integration variables to integrate x*y Note that ``integrate(x)`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' Piecewise((gamma(a + 1), re(a) > -1), (Integral(x**a*exp(-x), (x, 0, oo)), True)) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') gamma(a + 1) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') (gamma(a + 1), re(a) > -1) See Also ======== Integral, Integral.doit
865
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args) ``` ###Assistant : integrate(f, var, ...) Explanation =========== Compute definite or indefinite integral of one or more variables using Risch-Norman algorithm and table lookup. This procedure is able to handle elementary algebraic and transcendental functions and also a huge class of special functions, including Airy, Bessel, Whittaker and Lambert. var can be: - a symbol -- indefinite integration - a tuple (symbol, a) -- indefinite integration with result given with ``a`` replacing ``symbol`` - a tuple (symbol, a, b) -- definite integration Several variables can be specified, in which case the result is multiple integration. (If var is omitted and the integrand is univariate, the indefinite integral in that variable will be performed.) Indefinite integrals are returned without terms that are independent of the integration variables. (see examples) Definite improper integrals often entail delicate convergence conditions. Pass conds='piecewise', 'separate' or 'none' to have these returned, respectively, as a Piecewise function, as a separate result (i.e. result will be a tuple), or not at all (default is 'piecewise'). **Strategy** SymPy uses various approaches to definite integration. One method is to find an antiderivative for the integrand, and then use the fundamental theorem of calculus. Various functions are implemented to integrate polynomial, rational and trigonometric functions, and integrands containing DiracDelta terms. SymPy also implements the part of the Risch algorithm, which is a decision procedure for integrating elementary functions, i.e., the algorithm can either find an elementary antiderivative, or prove that one does not exist. There is also a (very successful, albeit somewhat slow) general implementation of the heuristic Risch algorithm. This algorithm will eventually be phased out as more of the full Risch algorithm is implemented. See the docstring of Integral._eval_integral() for more details on computing the antiderivative using algebraic methods. The option risch=True can be used to use only the (full) Risch algorithm. This is useful if you want to know if an elementary function has an elementary antiderivative. If the indefinite Integral returned by this function is an instance of NonElementaryIntegral, that means that the Risch algorithm has proven that integral to be non-elementary. Note that by default, additional methods (such as the Meijer G method outlined below) are tried on these integrals, as they may be expressible in terms of special functions, so if you only care about elementary answers, use risch=True. Also note that an unevaluated Integral returned by this function is not necessarily a NonElementaryIntegral, even with risch=True, as it may just be an indication that the particular part of the Risch algorithm needed to integrate that function is not yet implemented. Another family of strategies comes from re-writing the integrand in terms of so-called Meijer G-functions. Indefinite integrals of a single G-function can always be computed, and the definite integral of a product of two G-functions can be computed from zero to infinity. Various strategies are implemented to rewrite integrands as G-functions, and use this information to compute integrals (see the ``meijerint`` module). The option manual=True can be used to use only an algorithm that tries to mimic integration by hand. This algorithm does not handle as many integrands as the other algorithms implemented but may return results in a more familiar form. The ``manualintegrate`` module has functions that return the steps used (see the module docstring for more information). In general, the algebraic methods work best for computing antiderivatives of (possibly complicated) combinations of elementary functions. The G-function methods work best for computing definite integrals from zero to infinity of moderately complicated combinations of special functions, or indefinite integrals of very simple combinations of special functions. The strategy employed by the integration code is as follows: - If computing a definite integral, and both limits are real, and at least one limit is +- oo, try the G-function method of definite integration first. - Try to find an antiderivative, using all available methods, ordered by performance (that is try fastest method first, slowest last; in particular polynomial integration is tried first, Meijer G-functions second to last, and heuristic Risch last). - If still not successful, try G-functions irrespective of the limits. The option meijerg=True, False, None can be used to, respectively: always use G-function methods and no others, never use G-function methods, or use all available methods (in order as described above). It defaults to None. Examples ======== >>> from sympy import integrate, log, exp, oo >>> from sympy.abc import a, x, y >>> integrate(x*y, x) x**2*y/2 >>> integrate(log(x), x) x*log(x) - x >>> integrate(log(x), (x, 1, a)) a*log(a) - a + 1 >>> integrate(x) x**2/2 Terms that are independent of x are dropped by indefinite integration: >>> from sympy import sqrt >>> integrate(sqrt(1 + x), (x, 0, x)) 2*(x + 1)**(3/2)/3 - 2/3 >>> integrate(sqrt(1 + x), x) 2*(x + 1)**(3/2)/3 >>> integrate(x*y) Traceback (most recent call last): ... ValueError: specify integration variables to integrate x*y Note that ``integrate(x)`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' Piecewise((gamma(a + 1), re(a) > -1), (Integral(x**a*exp(-x), (x, 0, oo)), True)) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') gamma(a + 1) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') (gamma(a + 1), re(a) > -1) See Also ======== Integral, Integral.doit
2,167
def parsed_paths(self) -> List[str]: res_paths: List[str] = [] paths = self.parser.existing_paths for directory in paths: for filename in paths[directory]: res_paths.append(os.path.join(directory, filename)) return res_paths
Returns a list of file paths that have currently been parsed into the parser tree. The returned list may include paths with wildcard characters, for example: ['/etc/apache2/conf.d/*.load'] This is typically called on the root node of the ParserNode tree. :returns: list of file paths of files that have been parsed
50
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parsed_paths(self) -> List[str]: res_paths: List[str] = [] paths = self.parser.existing_paths for directory in paths: for filename in paths[directory]: res_paths.append(os.path.join(directory, filename)) return res_paths ``` ###Assistant : Returns a list of file paths that have currently been parsed into the parser tree. The returned list may include paths with wildcard characters, for example: ['/etc/apache2/conf.d/*.load'] This is typically called on the root node of the ParserNode tree. :returns: list of file paths of files that have been parsed
2,168
def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False): for dest_name, src_name, typecode in old_toc: if misc.mtime(src_name) > last_build: logger.info("Building because %s changed", src_name) return True elif pyc and typecode == 'PYMODULE': py_filename = src_name[:-1] if misc.mtime(py_filename) > last_build: logger.info("Building because %s changed", py_filename) return True return False
Rebuild is required if mtimes of files listed in old TOC are newer than last_build. If pyc=True, check for .py files as well. Use this for calculated/analysed values read from cache.
31
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False): for dest_name, src_name, typecode in old_toc: if misc.mtime(src_name) > last_build: logger.info("Building because %s changed", src_name) return True elif pyc and typecode == 'PYMODULE': py_filename = src_name[:-1] if misc.mtime(py_filename) > last_build: logger.info("Building because %s changed", py_filename) return True return False ``` ###Assistant : Rebuild is required if mtimes of files listed in old TOC are newer than last_build. If pyc=True, check for .py files as well. Use this for calculated/analysed values read from cache.
2,169
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT 1 FROM (SELECT 1 AS "dual") AS "dual"') response.success = True except teradatasql.Error as e: log.error(f'Error connecting to Teradata {self.host}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response
Check the connection of the Teradata database :return: success status and error message if error occurs
16
65
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT 1 FROM (SELECT 1 AS "dual") AS "dual"') response.success = True except teradatasql.Error as e: log.error(f'Error connecting to Teradata {self.host}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response ``` ###Assistant : Check the connection of the Teradata database :return: success status and error message if error occurs
2,170
def process_doc_file(code_file, add_new_line=True): with open(code_file, "r", encoding="utf-8", newline="\n") as f: code = f.read() # fmt: off splits = code.split("```") if len(splits) % 2 != 1: raise ValueError("The number of occurrences of ``` should be an even number.") splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)] clean_code = "```".join(splits) # fmt: on diff = clean_code != code if diff: print(f"Overwriting content of {code_file}.") with open(code_file, "w", encoding="utf-8", newline="\n") as f: f.write(clean_code)
Process given file. Args: code_file (`str` or `os.PathLike`): The file in which we want to style the docstring.
18
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def process_doc_file(code_file, add_new_line=True): with open(code_file, "r", encoding="utf-8", newline="\n") as f: code = f.read() # fmt: off splits = code.split("```") if len(splits) % 2 != 1: raise ValueError("The number of occurrences of ``` should be an even number.") splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)] clean_code = "```".join(splits) # fmt: on diff = clean_code != code if diff: print(f"Overwriting content of {code_file}.") with open(code_file, "w", encoding="utf-8", newline="\n") as f: f.write(clean_code) ``` ###Assistant : Process given file. Args: code_file (`str` or `os.PathLike`): The file in which we want to style the docstring.
2,171
def finalize(self, batch): for face, landmarks in zip(batch["detected_faces"], batch["landmarks"]): if not isinstance(landmarks, np.ndarray): landmarks = np.array(landmarks) face._landmarks_xy = landmarks logger.trace("Item out: %s", {key: val.shape if isinstance(val, np.ndarray) else val for key, val in batch.items()}) for filename, face in zip(batch["filename"], batch["detected_faces"]): self._output_faces.append(face) if len(self._output_faces) != self._faces_per_filename[filename]: continue output = self._extract_media.pop(filename) output.add_detected_faces(self._output_faces) self._output_faces = [] logger.trace("Final Output: (filename: '%s', image shape: %s, detected_faces: %s, " "item: %s)", output.filename, output.image_shape, output.detected_faces, output) yield output # <<< PROTECTED METHODS >>> # # << PROCESS_INPUT WRAPPER >>
Finalize the output from Aligner This should be called as the final task of each `plugin`. Pairs the detected faces back up with their original frame before yielding each frame. Parameters ---------- batch : dict The final ``dict`` from the `plugin` process. It must contain the `keys`: ``detected_faces``, ``landmarks``, ``filename`` Yields ------ :class:`~plugins.extract.pipeline.ExtractMedia` The :attr:`DetectedFaces` list will be populated for this class with the bounding boxes and landmarks for the detected faces found in the frame.
76
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def finalize(self, batch): for face, landmarks in zip(batch["detected_faces"], batch["landmarks"]): if not isinstance(landmarks, np.ndarray): landmarks = np.array(landmarks) face._landmarks_xy = landmarks logger.trace("Item out: %s", {key: val.shape if isinstance(val, np.ndarray) else val for key, val in batch.items()}) for filename, face in zip(batch["filename"], batch["detected_faces"]): self._output_faces.append(face) if len(self._output_faces) != self._faces_per_filename[filename]: continue output = self._extract_media.pop(filename) output.add_detected_faces(self._output_faces) self._output_faces = [] logger.trace("Final Output: (filename: '%s', image shape: %s, detected_faces: %s, " "item: %s)", output.filename, output.image_shape, output.detected_faces, output) yield output # <<< PROTECTED METHODS >>> # # << PROCESS_INPUT WRAPPER >> ``` ###Assistant : Finalize the output from Aligner This should be called as the final task of each `plugin`. Pairs the detected faces back up with their original frame before yielding each frame. Parameters ---------- batch : dict The final ``dict`` from the `plugin` process. It must contain the `keys`: ``detected_faces``, ``landmarks``, ``filename`` Yields ------ :class:`~plugins.extract.pipeline.ExtractMedia` The :attr:`DetectedFaces` list will be populated for this class with the bounding boxes and landmarks for the detected faces found in the frame.
2,172
def find_dcerpc_interface(name): try: return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name) except StopIteration: raise AttributeError("Unknown interface !") # --- NDR fields - [C706] chap 14
Find an interface object through the name in the IDL
10
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_dcerpc_interface(name): try: return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name) except StopIteration: raise AttributeError("Unknown interface !") # --- NDR fields - [C706] chap 14 ``` ###Assistant : Find an interface object through the name in the IDL
2,173
def test_animatable(): animatable = AnimateTest() # Fake wall-clock time time = 100.0 # Object that does the animation animation = SimpleAnimation( animatable, "bar", time, 3.0, start_value=Animatable(20.0), end_value=Animatable(50.0), final_value=Animatable(50.0), easing=lambda x: x, ) assert animation(time) is False assert animatable.bar.value == 20.0 assert animation(time + 1.0) is False assert animatable.bar.value == 30.0 assert animation(time + 2.0) is False assert animatable.bar.value == 40.0 assert animation(time + 2.9) is False assert pytest.approx(animatable.bar.value, 49.0) assert animation(time + 3.0) is True # True to indicate animation is complete assert animatable.bar.value == 50.0
Test SimpleAnimation works with the Animatable protocol
7
86
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_animatable(): animatable = AnimateTest() # Fake wall-clock time time = 100.0 # Object that does the animation animation = SimpleAnimation( animatable, "bar", time, 3.0, start_value=Animatable(20.0), end_value=Animatable(50.0), final_value=Animatable(50.0), easing=lambda x: x, ) assert animation(time) is False assert animatable.bar.value == 20.0 assert animation(time + 1.0) is False assert animatable.bar.value == 30.0 assert animation(time + 2.0) is False assert animatable.bar.value == 40.0 assert animation(time + 2.9) is False assert pytest.approx(animatable.bar.value, 49.0) assert animation(time + 3.0) is True # True to indicate animation is complete assert animatable.bar.value == 50.0 ``` ###Assistant : Test SimpleAnimation works with the Animatable protocol
2,174
def update(self, bbox, score, label, gt_box, gt_label, difficult=None): if difficult is None: difficult = np.zeros_like(gt_label) # record class gt count for gtl, diff in zip(gt_label, difficult): if self.evaluate_difficult or int(diff) == 0: self.class_gt_counts[int(np.array(gtl))] += 1 # record class score positive visited = [False] * len(gt_label) for b, s, l in zip(bbox, score, label): pred = b.tolist() if isinstance(b, np.ndarray) else b max_idx = -1 max_overlap = -1.0 for i, gl in enumerate(gt_label): if int(gl) == int(l): if len(gt_box[i]) == 8: overlap = calc_rbox_iou(pred, gt_box[i]) else: overlap = jaccard_overlap(pred, gt_box[i], self.is_bbox_normalized) if overlap > max_overlap: max_overlap = overlap max_idx = i if max_overlap > self.overlap_thresh: if self.evaluate_difficult or \ int(np.array(difficult[max_idx])) == 0: if not visited[max_idx]: self.class_score_poss[int(l)].append([s, 1.0]) visited[max_idx] = True else: self.class_score_poss[int(l)].append([s, 0.0]) else: self.class_score_poss[int(l)].append([s, 0.0])
Update metric statics from given prediction and ground truth infomations.
10
125
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, bbox, score, label, gt_box, gt_label, difficult=None): if difficult is None: difficult = np.zeros_like(gt_label) # record class gt count for gtl, diff in zip(gt_label, difficult): if self.evaluate_difficult or int(diff) == 0: self.class_gt_counts[int(np.array(gtl))] += 1 # record class score positive visited = [False] * len(gt_label) for b, s, l in zip(bbox, score, label): pred = b.tolist() if isinstance(b, np.ndarray) else b max_idx = -1 max_overlap = -1.0 for i, gl in enumerate(gt_label): if int(gl) == int(l): if len(gt_box[i]) == 8: overlap = calc_rbox_iou(pred, gt_box[i]) else: overlap = jaccard_overlap(pred, gt_box[i], self.is_bbox_normalized) if overlap > max_overlap: max_overlap = overlap max_idx = i if max_overlap > self.overlap_thresh: if self.evaluate_difficult or \ int(np.array(difficult[max_idx])) == 0: if not visited[max_idx]: self.class_score_poss[int(l)].append([s, 1.0]) visited[max_idx] = True else: self.class_score_poss[int(l)].append([s, 0.0]) else: self.class_score_poss[int(l)].append([s, 0.0]) ``` ###Assistant : Update metric statics from given prediction and ground truth infomations.
2,175
def _get_permission_objects_for_actions(self, actions): permission_codenames = [ "%s_%s" % (action, self.model_name) for action in actions ] return Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames )
Get a queryset of the Permission objects for the given actions
11
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_permission_objects_for_actions(self, actions): permission_codenames = [ "%s_%s" % (action, self.model_name) for action in actions ] return Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) ``` ###Assistant : Get a queryset of the Permission objects for the given actions
2,176
def createWindow(self, wintype): debug_type = debug.qenum_key(QWebEnginePage, wintype) background = config.val.tabs.background log.webview.debug("createWindow with type {}, background {}".format( debug_type, background)) if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow: # Shift-Alt-Click target = usertypes.ClickTarget.window elif wintype == QWebEnginePage.WebWindowType.WebDialog: log.webview.warning("{} requested, but we don't support " "that!".format(debug_type)) target = usertypes.ClickTarget.tab elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab: # Middle-click / Ctrl-Click with Shift # FIXME:qtwebengine this also affects target=_blank links... if background: target = usertypes.ClickTarget.tab else: target = usertypes.ClickTarget.tab_bg elif wintype == QWebEnginePage.WebWindowType.WebBrowserBackgroundTab: # Middle-click / Ctrl-Click if background: target = usertypes.ClickTarget.tab_bg else: target = usertypes.ClickTarget.tab else: raise ValueError("Invalid wintype {}".format(debug_type)) tab = shared.get_tab(self._win_id, target) return tab._widget # pylint: disable=protected-access
Called by Qt when a page wants to create a new window. This function is called from the createWindow() method of the associated QWebEnginePage, each time the page wants to create a new window of the given type. This might be the result, for example, of a JavaScript request to open a document in a new window. Args: wintype: This enum describes the types of window that can be created by the createWindow() function. QWebEnginePage::WebBrowserWindow: A complete web browser window. QWebEnginePage::WebBrowserTab: A web browser tab. QWebEnginePage::WebDialog: A window without decoration. QWebEnginePage::WebBrowserBackgroundTab: A web browser tab without hiding the current visible WebEngineView. Return: The new QWebEngineView object.
106
99
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def createWindow(self, wintype): debug_type = debug.qenum_key(QWebEnginePage, wintype) background = config.val.tabs.background log.webview.debug("createWindow with type {}, background {}".format( debug_type, background)) if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow: # Shift-Alt-Click target = usertypes.ClickTarget.window elif wintype == QWebEnginePage.WebWindowType.WebDialog: log.webview.warning("{} requested, but we don't support " "that!".format(debug_type)) target = usertypes.ClickTarget.tab elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab: # Middle-click / Ctrl-Click with Shift # FIXME:qtwebengine this also affects target=_blank links... if background: target = usertypes.ClickTarget.tab else: target = usertypes.ClickTarget.tab_bg elif wintype == QWebEnginePage.WebWindowType.WebBrowserBackgroundTab: # Middle-click / Ctrl-Click if background: target = usertypes.ClickTarget.tab_bg else: target = usertypes.ClickTarget.tab else: raise ValueError("Invalid wintype {}".format(debug_type)) tab = shared.get_tab(self._win_id, target) return tab._widget # pylint: disable=protected-access ``` ###Assistant : Called by Qt when a page wants to create a new window. This function is called from the createWindow() method of the associated QWebEnginePage, each time the page wants to create a new window of the given type. This might be the result, for example, of a JavaScript request to open a document in a new window. Args: wintype: This enum describes the types of window that can be created by the createWindow() function. QWebEnginePage::WebBrowserWindow: A complete web browser window. QWebEnginePage::WebBrowserTab: A web browser tab. QWebEnginePage::WebDialog: A window without decoration. QWebEnginePage::WebBrowserBackgroundTab: A web browser tab without hiding the current visible WebEngineView. Return: The new QWebEngineView object.
2,177
def predict(self, X): raw_predictions = self.decision_function(X) encoded_labels = self._loss._raw_prediction_to_decision(raw_predictions) return self.classes_.take(encoded_labels, axis=0)
Predict class for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : ndarray of shape (n_samples,) The predicted values.
47
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, X): raw_predictions = self.decision_function(X) encoded_labels = self._loss._raw_prediction_to_decision(raw_predictions) return self.classes_.take(encoded_labels, axis=0) ``` ###Assistant : Predict class for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : ndarray of shape (n_samples,) The predicted values.
2,178
def union_all(graphs, rename=()): R = None seen_nodes = set() # rename graph to obtain disjoint node labels
Returns the union of all graphs. The graphs must be disjoint, otherwise an exception is raised. Parameters ---------- graphs : iterable Iterable of NetworkX graphs rename : iterable , optional Node names of graphs can be changed by specifying the tuple rename=('G-','H-') (for example). Node "u" in G is then renamed "G-u" and "v" in H is renamed "H-v". Infinite generators (like itertools.count) are also supported. Returns ------- U : a graph with the same type as the first graph in list Raises ------ ValueError If `graphs` is an empty list. Notes ----- To force a disjoint union with node relabeling, use disjoint_union_all(G,H) or convert_node_labels_to integers(). Graph, edge, and node attributes are propagated to the union graph. If a graph attribute is present in multiple graphs, then the value from the last graph in the list with that attribute is used. See Also -------- union disjoint_union_all
146
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def union_all(graphs, rename=()): R = None seen_nodes = set() # rename graph to obtain disjoint node labels ``` ###Assistant : Returns the union of all graphs. The graphs must be disjoint, otherwise an exception is raised. Parameters ---------- graphs : iterable Iterable of NetworkX graphs rename : iterable , optional Node names of graphs can be changed by specifying the tuple rename=('G-','H-') (for example). Node "u" in G is then renamed "G-u" and "v" in H is renamed "H-v". Infinite generators (like itertools.count) are also supported. Returns ------- U : a graph with the same type as the first graph in list Raises ------ ValueError If `graphs` is an empty list. Notes ----- To force a disjoint union with node relabeling, use disjoint_union_all(G,H) or convert_node_labels_to integers(). Graph, edge, and node attributes are propagated to the union graph. If a graph attribute is present in multiple graphs, then the value from the last graph in the list with that attribute is used. See Also -------- union disjoint_union_all
2,179
def test_unknown_category_that_are_negative(): rng = np.random.RandomState(42) n_samples = 1000 X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)] y = np.zeros(shape=n_samples) y[X[:, 1] % 2 == 0] = 1 hist = HistGradientBoostingRegressor( random_state=0, categorical_features=[False, True], max_iter=10, ).fit(X, y) # Check that negative values from the second column are treated like a # missing category X_test_neg = np.asarray([[1, -2], [3, -4]]) X_test_nan = np.asarray([[1, np.nan], [3, np.nan]]) assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))
Check that unknown categories that are negative does not error. Non-regression test for #24274.
14
63
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unknown_category_that_are_negative(): rng = np.random.RandomState(42) n_samples = 1000 X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)] y = np.zeros(shape=n_samples) y[X[:, 1] % 2 == 0] = 1 hist = HistGradientBoostingRegressor( random_state=0, categorical_features=[False, True], max_iter=10, ).fit(X, y) # Check that negative values from the second column are treated like a # missing category X_test_neg = np.asarray([[1, -2], [3, -4]]) X_test_nan = np.asarray([[1, np.nan], [3, np.nan]]) assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan)) ``` ###Assistant : Check that unknown categories that are negative does not error. Non-regression test for #24274.
2,180
def forward(self, x_input, mask, cache=None): if isinstance(x_input, tuple): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None # whether to use macaron style if self.feed_forward_macaron is not None: residual = x if self.normalize_before: x = self.norm_ff_macaron(x) x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x)) if not self.normalize_before: x = self.norm_ff_macaron(x) # multi-headed self-attention module residual = x if self.normalize_before: x = self.norm_mha(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + self.concat_linear(x_concat) else: x = residual + self.dropout(x_att) if not self.normalize_before: x = self.norm_mha(x) # convolution module if self.conv_module is not None: residual = x if self.normalize_before: x = self.norm_conv(x) x = residual + self.dropout(self.conv_module(x)) if not self.normalize_before: x = self.norm_conv(x) # feed forward module residual = x if self.normalize_before: x = self.norm_ff(x) x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm_ff(x) if self.conv_module is not None: x = self.norm_final(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask
Compute encoded features. :param torch.Tensor x_input: encoded source features, w/o pos_emb tuple((batch, max_time_in, size), (1, max_time_in, size)) or (batch, max_time_in, size) :param torch.Tensor mask: mask for x (batch, max_time_in) :param torch.Tensor cache: cache for x (batch, max_time_in - 1, size) :rtype: Tuple[torch.Tensor, torch.Tensor]
43
225
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, x_input, mask, cache=None): if isinstance(x_input, tuple): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None # whether to use macaron style if self.feed_forward_macaron is not None: residual = x if self.normalize_before: x = self.norm_ff_macaron(x) x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x)) if not self.normalize_before: x = self.norm_ff_macaron(x) # multi-headed self-attention module residual = x if self.normalize_before: x = self.norm_mha(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + self.concat_linear(x_concat) else: x = residual + self.dropout(x_att) if not self.normalize_before: x = self.norm_mha(x) # convolution module if self.conv_module is not None: residual = x if self.normalize_before: x = self.norm_conv(x) x = residual + self.dropout(self.conv_module(x)) if not self.normalize_before: x = self.norm_conv(x) # feed forward module residual = x if self.normalize_before: x = self.norm_ff(x) x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm_ff(x) if self.conv_module is not None: x = self.norm_final(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask ``` ###Assistant : Compute encoded features. :param torch.Tensor x_input: encoded source features, w/o pos_emb tuple((batch, max_time_in, size), (1, max_time_in, size)) or (batch, max_time_in, size) :param torch.Tensor mask: mask for x (batch, max_time_in) :param torch.Tensor cache: cache for x (batch, max_time_in - 1, size) :rtype: Tuple[torch.Tensor, torch.Tensor]
2,181
def test_prompt_invalid_template_format() -> None: template = "This is a {foo} test." input_variables = ["foo"] with pytest.raises(ValueError): Prompt( input_variables=input_variables, template=template, template_format="bar" )
Test initializing a prompt with invalid template format.
8
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_prompt_invalid_template_format() -> None: template = "This is a {foo} test." input_variables = ["foo"] with pytest.raises(ValueError): Prompt( input_variables=input_variables, template=template, template_format="bar" ) ``` ###Assistant : Test initializing a prompt with invalid template format.
2,182
def test_medium_does_not_exist(self) -> None: # test for unknown medium url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) # test for unknown user with a known medium url = "/_synapse/admin/v1/threepid/email/users/unknown" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
Tests that both a lookup for a medium that does not exist and a user that doesn't exist with that third party ID returns a 404
26
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_medium_does_not_exist(self) -> None: # test for unknown medium url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) # test for unknown user with a known medium url = "/_synapse/admin/v1/threepid/email/users/unknown" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) ``` ###Assistant : Tests that both a lookup for a medium that does not exist and a user that doesn't exist with that third party ID returns a 404
2,183
def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs ) else: self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) if len(mismatches) > 0 and prefix == "": mismatches_msg = "\n".join(mismatches) raise ValueError( "Please correct the following DeepSpeed config values that mismatch kwargs " f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'." ) @dataclass
Process the DeepSpeed config with the values from the kwargs.
10
88
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs ) else: self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) if len(mismatches) > 0 and prefix == "": mismatches_msg = "\n".join(mismatches) raise ValueError( "Please correct the following DeepSpeed config values that mismatch kwargs " f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'." ) @dataclass ``` ###Assistant : Process the DeepSpeed config with the values from the kwargs.
2,184
def get_metrics_result(self): # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics
Returns the model's metrics values as a dict. If any of the metric result is a dict (containing multiple metrics), each of them gets added to the top level returned dict of this method. Returns: A `dict` containing values of the metrics listed in `self.metrics`. Example: `{'loss': 0.2, 'accuracy': 0.7}`.
50
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_metrics_result(self): # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics ``` ###Assistant : Returns the model's metrics values as a dict. If any of the metric result is a dict (containing multiple metrics), each of them gets added to the top level returned dict of this method. Returns: A `dict` containing values of the metrics listed in `self.metrics`. Example: `{'loss': 0.2, 'accuracy': 0.7}`.
2,185
def refactor_docstring(self, input, filename): result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(keepends=True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + "\n")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return "".join(result)
Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.)
65
95
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def refactor_docstring(self, input, filename): result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(keepends=True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + "\n")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return "".join(result) ``` ###Assistant : Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.)
2,186
def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges): ( dag, group, ( group_dm1, group_dm2, group_dm3, dm_in1, dm_in2, dm_in3, dm_in4, dm_out1, dm_out2, dm_out3, dm_out4, op_in1, op_out1, ), ) = test_complex_taskgroup_dag group_dm1 << [group_dm2, group_dm3] group << dm_in1 group << Label('label dm_in2 <=> group') << dm_in2 group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4] group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key') dm_out1 << group dm_out2 << Label('label group <=> dm_out2') << group [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group compare_dag_edges(dag_edges(dag), complex_dag_expected_edges)
Tests the complex reversed dag with a TaskGroup and a Label
11
89
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges): ( dag, group, ( group_dm1, group_dm2, group_dm3, dm_in1, dm_in2, dm_in3, dm_in4, dm_out1, dm_out2, dm_out3, dm_out4, op_in1, op_out1, ), ) = test_complex_taskgroup_dag group_dm1 << [group_dm2, group_dm3] group << dm_in1 group << Label('label dm_in2 <=> group') << dm_in2 group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4] group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key') dm_out1 << group dm_out2 << Label('label group <=> dm_out2') << group [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group compare_dag_edges(dag_edges(dag), complex_dag_expected_edges) ``` ###Assistant : Tests the complex reversed dag with a TaskGroup and a Label
2,187
def postprocess_response(token_ids, tokenizer): eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.sep_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return tokens
Post-process the decoded sequence. Truncate from the first <eos>.
9
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def postprocess_response(token_ids, tokenizer): eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.sep_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return tokens ``` ###Assistant : Post-process the decoded sequence. Truncate from the first <eos>.
2,188
def clip_grad_norm_(self, parameters, max_norm, norm_type=2): if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return model.clip_grad_norm_(max_norm, norm_type) elif self.distributed_type == DistributedType.DEEPSPEED: # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed # We cannot return the gradient norm because DeepSpeed does it. return None self.unscale_gradients() return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
Should be used in place of `torch.nn.utils.clip_grad_norm_`. Returns: `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for (input, target) in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) ... optimizer.step() ```
69
66
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clip_grad_norm_(self, parameters, max_norm, norm_type=2): if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return model.clip_grad_norm_(max_norm, norm_type) elif self.distributed_type == DistributedType.DEEPSPEED: # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed # We cannot return the gradient norm because DeepSpeed does it. return None self.unscale_gradients() return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) ``` ###Assistant : Should be used in place of `torch.nn.utils.clip_grad_norm_`. Returns: `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for (input, target) in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) ... optimizer.step() ```
2,189
def check_library_list(self, libraries): if not isinstance(libraries, list): raise DistutilsSetupError( "'libraries' option must be a list of tuples") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( "each element of 'libraries' must a 2-tuple") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( "first element of each tuple in 'libraries' " "must be a string (the library name)") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError("bad library name '%s': " "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'libraries' " "must be a dictionary (build info)")
Ensure that the list of libraries is valid. `library` is presumably provided as a command option 'libraries'. This method checks that it is a list of 2-tuples, where the tuples are (library_name, build_info_dict). Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise.
44
108
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_library_list(self, libraries): if not isinstance(libraries, list): raise DistutilsSetupError( "'libraries' option must be a list of tuples") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( "each element of 'libraries' must a 2-tuple") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( "first element of each tuple in 'libraries' " "must be a string (the library name)") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError("bad library name '%s': " "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'libraries' " "must be a dictionary (build info)") ``` ###Assistant : Ensure that the list of libraries is valid. `library` is presumably provided as a command option 'libraries'. This method checks that it is a list of 2-tuples, where the tuples are (library_name, build_info_dict). Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise.
2,190
def _process_new_state(cls, new_state, unprocessed, processed): if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state
Preprocess the state transition action of a token definition.
9
130
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _process_new_state(cls, new_state, unprocessed, processed): if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state ``` ###Assistant : Preprocess the state transition action of a token definition.
2,191
def test_ohe_infrequent_two_levels_user_cats(): X_train = np.array( [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object ).T ohe = OneHotEncoder( categories=[["c", "d", "a", "b"]], sparse=False, handle_unknown="infrequent_if_exist", max_categories=2, ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [["c", "d", "a"]]) X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) # 'infrequent' is used to denote the infrequent categories for # `inverse_transform` expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv)
Test that the order of the categories provided by a user is respected.
13
89
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ohe_infrequent_two_levels_user_cats(): X_train = np.array( [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object ).T ohe = OneHotEncoder( categories=[["c", "d", "a", "b"]], sparse=False, handle_unknown="infrequent_if_exist", max_categories=2, ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [["c", "d", "a"]]) X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) # 'infrequent' is used to denote the infrequent categories for # `inverse_transform` expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv) ``` ###Assistant : Test that the order of the categories provided by a user is respected.
2,192
def fit(self, X, y=None): self._validate_params() random_state = check_random_state(self.random_state) X = self._validate_data(X) self.mean_ = X.mean(axis=0) X = X - self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components return self._fit(X, n_components, random_state)
Fit the model from data in X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the instance itself.
53
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y=None): self._validate_params() random_state = check_random_state(self.random_state) X = self._validate_data(X) self.mean_ = X.mean(axis=0) X = X - self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components return self._fit(X, n_components, random_state) ``` ###Assistant : Fit the model from data in X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the instance itself.
2,193
def test_create_profile(): invoke_and_assert( ["profile", "create", "foo"], expected_output=( f ), ) profiles = load_profiles() assert profiles["foo"] == Profile( name="foo", settings={}, source=PREFECT_PROFILES_PATH.value() )
Created profile 'foo'. Switch to your new profile with: prefect profile use 'foo' Or, to use it for a single command, include the `-p` option: prefect -p 'foo' config view
30
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_create_profile(): invoke_and_assert( ["profile", "create", "foo"], expected_output=( f ), ) profiles = load_profiles() assert profiles["foo"] == Profile( name="foo", settings={}, source=PREFECT_PROFILES_PATH.value() ) ``` ###Assistant : Created profile 'foo'. Switch to your new profile with: prefect profile use 'foo' Or, to use it for a single command, include the `-p` option: prefect -p 'foo' config view
2,194
def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt="o") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt="o") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r_asym = [1.1]*3 xerr = [[.3, .3, .2], [.2, .3, .3]] yerr = [[.35, .5, .5], [.5, .35, .5]] ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt="o") # overlapping errorbar th_over = [2.1] r_over = [3.1] ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt="o")
Mix several polar errorbar use cases in a single test figure. It is advisable to position individual points off the grid. If there are problems with reproducibility of this test, consider removing grid.
33
97
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt="o") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt="o") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r_asym = [1.1]*3 xerr = [[.3, .3, .2], [.2, .3, .3]] yerr = [[.35, .5, .5], [.5, .35, .5]] ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt="o") # overlapping errorbar th_over = [2.1] r_over = [3.1] ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt="o") ``` ###Assistant : Mix several polar errorbar use cases in a single test figure. It is advisable to position individual points off the grid. If there are problems with reproducibility of this test, consider removing grid.
2,195
def test_edgeql_for_01(self): self.assert_test_query( r, {(1, 1), (2, 2), (3, 3)}, )
FOR X IN {1,2,3} UNION ((SELECT X), (SELECT X));
9
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_edgeql_for_01(self): self.assert_test_query( r, {(1, 1), (2, 2), (3, 3)}, ) ``` ###Assistant : FOR X IN {1,2,3} UNION ((SELECT X), (SELECT X));
2,196
def on_clicked(self, index): if not index.isValid(): return item = self._model().data(index, downloads.ModelRole.item) if item.done and item.successful: item.open_file() item.remove()
Handle clicking of an item. Args: index: The QModelIndex of the clicked item.
13
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def on_clicked(self, index): if not index.isValid(): return item = self._model().data(index, downloads.ModelRole.item) if item.done and item.successful: item.open_file() item.remove() ``` ###Assistant : Handle clicking of an item. Args: index: The QModelIndex of the clicked item.
2,197
def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( , XLM_ROBERTA_XL_START_DOCSTRING, )
XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.
23
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( , XLM_ROBERTA_XL_START_DOCSTRING, ) ``` ###Assistant : XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.
2,198
def matches_minor(self, other): return (self.major, self.minor) == (other.major, other.minor)
Check whether this version matches the other in (major, minor).
10
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def matches_minor(self, other): return (self.major, self.minor) == (other.major, other.minor) ``` ###Assistant : Check whether this version matches the other in (major, minor).
2,199
def test_04_query_predictor_single_where_condition(self): time.sleep(120) # TODO query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.TABLE) self.assertTrue(len(response.data_frame) == 1) self.assertTrue(response.data_frame['sqft'][0] == 100) self.assertTrue(response.data_frame['rental_price'][0] is not None)
SELECT target from {self.test_model_1} WHERE sqft=100
6
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_04_query_predictor_single_where_condition(self): time.sleep(120) # TODO query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.TABLE) self.assertTrue(len(response.data_frame) == 1) self.assertTrue(response.data_frame['sqft'][0] == 100) self.assertTrue(response.data_frame['rental_price'][0] is not None) ``` ###Assistant : SELECT target from {self.test_model_1} WHERE sqft=100